]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
fe8ab488 2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
0a7de745 28/*
1c79356b
A
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
0a7de745 37 */
1c79356b 38
1c79356b
A
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
91447636
A
43#include <sys/mman.h>
44#include <sys/mount_internal.h>
45#include <sys/vnode_internal.h>
46#include <sys/ubc_internal.h>
1c79356b 47#include <sys/ucred.h>
91447636
A
48#include <sys/proc_internal.h>
49#include <sys/kauth.h>
1c79356b 50#include <sys/buf.h>
13fec989 51#include <sys/user.h>
2d21ac55 52#include <sys/codesign.h>
fe8ab488
A
53#include <sys/codedir_internal.h>
54#include <sys/fsevents.h>
c18c124e 55#include <sys/fcntl.h>
1c79356b
A
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
91447636
A
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
b0d623f7 61#include <mach/mach_vm.h>
91447636 62#include <mach/upl.h>
1c79356b 63
91447636 64#include <kern/kern_types.h>
2d21ac55 65#include <kern/kalloc.h>
1c79356b 66#include <kern/zalloc.h>
13fec989 67#include <kern/thread.h>
5ba3f43e 68#include <vm/pmap.h>
91447636
A
69#include <vm/vm_kern.h>
70#include <vm/vm_protos.h> /* last */
1c79356b 71
2d21ac55 72#include <libkern/crypto/sha1.h>
3e170ce0 73#include <libkern/crypto/sha2.h>
39236c6e
A
74#include <libkern/libkern.h>
75
593a1d5f 76#include <security/mac_framework.h>
fe8ab488 77#include <stdbool.h>
593a1d5f 78
2d21ac55
A
79/* XXX These should be in a BSD accessible Mach header, but aren't. */
80extern kern_return_t memory_object_pages_resident(memory_object_control_t,
0a7de745
A
81 boolean_t *);
82extern kern_return_t memory_object_signed(memory_object_control_t control,
83 boolean_t is_signed);
84extern boolean_t memory_object_is_signed(memory_object_control_t);
6d2010ae 85
d9a64523
A
86/* XXX Same for those. */
87
2d21ac55
A
88extern void Debugger(const char *message);
89
90
91/* XXX no one uses this interface! */
92kern_return_t ubc_page_op_with_control(
0a7de745
A
93 memory_object_control_t control,
94 off_t f_offset,
95 int ops,
96 ppnum_t *phys_entryp,
97 int *flagsp);
2d21ac55
A
98
99
1c79356b
A
100#if DIAGNOSTIC
101#if defined(assert)
b0d623f7 102#undef assert
1c79356b
A
103#endif
104#define assert(cond) \
2d21ac55 105 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
1c79356b
A
106#else
107#include <kern/assert.h>
108#endif /* DIAGNOSTIC */
109
2d21ac55 110static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
0c530ab8 111static int ubc_umcallback(vnode_t, void *);
0c530ab8 112static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
2d21ac55 113static void ubc_cs_free(struct ubc_info *uip);
b4c24cb9 114
39037602 115static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
d9a64523 116static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
39037602 117
0a7de745
A
118struct zone *ubc_info_zone;
119static uint32_t cs_blob_generation_count = 1;
2d21ac55
A
120
121/*
122 * CODESIGNING
123 * Routines to navigate code signing data structures in the kernel...
124 */
b0d623f7
A
125
126extern int cs_debug;
127
0a7de745 128#define PAGE_SHIFT_4K (12)
fe8ab488 129
2d21ac55
A
130static boolean_t
131cs_valid_range(
132 const void *start,
133 const void *end,
134 const void *lower_bound,
135 const void *upper_bound)
136{
137 if (upper_bound < lower_bound ||
138 end < start) {
139 return FALSE;
140 }
141
142 if (start < lower_bound ||
143 end > upper_bound) {
144 return FALSE;
145 }
146
147 return TRUE;
148}
149
3e170ce0
A
150typedef void (*cs_md_init)(void *ctx);
151typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
152typedef void (*cs_md_final)(void *hash, void *ctx);
153
154struct cs_hash {
0a7de745
A
155 uint8_t cs_type; /* type code as per code signing */
156 size_t cs_size; /* size of effective hash (may be truncated) */
157 size_t cs_digest_size;/* size of native hash */
158 cs_md_init cs_init;
159 cs_md_update cs_update;
160 cs_md_final cs_final;
3e170ce0
A
161};
162
0a7de745
A
163uint8_t
164cs_hash_type(
165 struct cs_hash const * const cs_hash)
5ba3f43e 166{
0a7de745 167 return cs_hash->cs_type;
5ba3f43e
A
168}
169
d190cdc3 170static const struct cs_hash cs_hash_sha1 = {
0a7de745
A
171 .cs_type = CS_HASHTYPE_SHA1,
172 .cs_size = CS_SHA1_LEN,
173 .cs_digest_size = SHA_DIGEST_LENGTH,
174 .cs_init = (cs_md_init)SHA1Init,
175 .cs_update = (cs_md_update)SHA1Update,
176 .cs_final = (cs_md_final)SHA1Final,
3e170ce0
A
177};
178#if CRYPTO_SHA2
d190cdc3 179static const struct cs_hash cs_hash_sha256 = {
0a7de745
A
180 .cs_type = CS_HASHTYPE_SHA256,
181 .cs_size = SHA256_DIGEST_LENGTH,
182 .cs_digest_size = SHA256_DIGEST_LENGTH,
183 .cs_init = (cs_md_init)SHA256_Init,
184 .cs_update = (cs_md_update)SHA256_Update,
185 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 186};
d190cdc3 187static const struct cs_hash cs_hash_sha256_truncate = {
0a7de745
A
188 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
189 .cs_size = CS_SHA256_TRUNCATED_LEN,
190 .cs_digest_size = SHA256_DIGEST_LENGTH,
191 .cs_init = (cs_md_init)SHA256_Init,
192 .cs_update = (cs_md_update)SHA256_Update,
193 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 194};
d190cdc3 195static const struct cs_hash cs_hash_sha384 = {
0a7de745
A
196 .cs_type = CS_HASHTYPE_SHA384,
197 .cs_size = SHA384_DIGEST_LENGTH,
198 .cs_digest_size = SHA384_DIGEST_LENGTH,
199 .cs_init = (cs_md_init)SHA384_Init,
200 .cs_update = (cs_md_update)SHA384_Update,
201 .cs_final = (cs_md_final)SHA384_Final,
490019cf 202};
3e170ce0 203#endif
39037602 204
d190cdc3 205static struct cs_hash const *
3e170ce0
A
206cs_find_md(uint8_t type)
207{
208 if (type == CS_HASHTYPE_SHA1) {
209 return &cs_hash_sha1;
210#if CRYPTO_SHA2
211 } else if (type == CS_HASHTYPE_SHA256) {
212 return &cs_hash_sha256;
213 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
214 return &cs_hash_sha256_truncate;
490019cf
A
215 } else if (type == CS_HASHTYPE_SHA384) {
216 return &cs_hash_sha384;
3e170ce0
A
217#endif
218 }
219 return NULL;
220}
221
222union cs_hash_union {
0a7de745
A
223 SHA1_CTX sha1ctxt;
224 SHA256_CTX sha256ctx;
225 SHA384_CTX sha384ctx;
3e170ce0
A
226};
227
228
2d21ac55 229/*
490019cf
A
230 * Choose among different hash algorithms.
231 * Higher is better, 0 => don't use at all.
2d21ac55 232 */
d190cdc3 233static const uint32_t hashPriorities[] = {
490019cf
A
234 CS_HASHTYPE_SHA1,
235 CS_HASHTYPE_SHA256_TRUNCATED,
236 CS_HASHTYPE_SHA256,
237 CS_HASHTYPE_SHA384,
238};
b0d623f7 239
490019cf
A
240static unsigned int
241hash_rank(const CS_CodeDirectory *cd)
242{
243 uint32_t type = cd->hashType;
244 unsigned int n;
2d21ac55 245
0a7de745
A
246 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
247 if (hashPriorities[n] == type) {
490019cf 248 return n + 1;
0a7de745
A
249 }
250 }
251 return 0; /* not supported */
2d21ac55
A
252}
253
254
255/*
256 * Locating a page hash
257 */
258static const unsigned char *
259hashes(
260 const CS_CodeDirectory *cd,
3e170ce0
A
261 uint32_t page,
262 size_t hash_len,
263 const char *lower_bound,
264 const char *upper_bound)
2d21ac55
A
265{
266 const unsigned char *base, *top, *hash;
b0d623f7 267 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
2d21ac55
A
268
269 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
270
0a7de745 271 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
b0d623f7 272 /* Get first scatter struct */
39236c6e 273 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745
A
274 ((const char*)cd + ntohl(cd->scatterOffset));
275 uint32_t hashindex = 0, scount, sbase = 0;
b0d623f7
A
276 /* iterate all scatter structs */
277 do {
0a7de745
A
278 if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
279 if (cs_debug) {
b0d623f7
A
280 printf("CODE SIGNING: Scatter extends past Code Directory\n");
281 }
282 return NULL;
283 }
0a7de745 284
b0d623f7
A
285 scount = ntohl(scatter->count);
286 uint32_t new_base = ntohl(scatter->base);
287
288 /* last scatter? */
289 if (scount == 0) {
290 return NULL;
291 }
0a7de745
A
292
293 if ((hashindex > 0) && (new_base <= sbase)) {
294 if (cs_debug) {
b0d623f7 295 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
0a7de745 296 sbase, new_base);
b0d623f7 297 }
0a7de745 298 return NULL; /* unordered scatter array */
b0d623f7
A
299 }
300 sbase = new_base;
301
302 /* this scatter beyond page we're looking for? */
303 if (sbase > page) {
304 return NULL;
305 }
0a7de745
A
306
307 if (sbase + scount >= page) {
308 /* Found the scatter struct that is
b0d623f7
A
309 * referencing our page */
310
311 /* base = address of first hash covered by scatter */
0a7de745
A
312 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
313 hashindex * hash_len;
b0d623f7 314 /* top = address of first hash after this scatter */
3e170ce0 315 top = base + scount * hash_len;
0a7de745
A
316 if (!cs_valid_range(base, top, lower_bound,
317 upper_bound) ||
b0d623f7
A
318 hashindex > nCodeSlots) {
319 return NULL;
320 }
0a7de745 321
b0d623f7
A
322 break;
323 }
0a7de745
A
324
325 /* this scatter struct is before the page we're looking
b0d623f7 326 * for. Iterate. */
0a7de745 327 hashindex += scount;
b0d623f7 328 scatter++;
0a7de745
A
329 } while (1);
330
3e170ce0 331 hash = base + (page - sbase) * hash_len;
b0d623f7
A
332 } else {
333 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
3e170ce0 334 top = base + nCodeSlots * hash_len;
b0d623f7
A
335 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
336 page > nCodeSlots) {
337 return NULL;
338 }
339 assert(page < nCodeSlots);
2d21ac55 340
3e170ce0 341 hash = base + page * hash_len;
b0d623f7 342 }
0a7de745 343
3e170ce0 344 if (!cs_valid_range(hash, hash + hash_len,
0a7de745 345 lower_bound, upper_bound)) {
2d21ac55
A
346 hash = NULL;
347 }
348
349 return hash;
350}
39236c6e
A
351
352/*
353 * cs_validate_codedirectory
354 *
355 * Validate that pointers inside the code directory to make sure that
356 * all offsets and lengths are constrained within the buffer.
357 *
358 * Parameters: cd Pointer to code directory buffer
359 * length Length of buffer
360 *
361 * Returns: 0 Success
362 * EBADEXEC Invalid code signature
363 */
364
365static int
366cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
367{
d190cdc3 368 struct cs_hash const *hashtype;
39236c6e 369
0a7de745 370 if (length < sizeof(*cd)) {
39236c6e 371 return EBADEXEC;
0a7de745
A
372 }
373 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
39236c6e 374 return EBADEXEC;
0a7de745
A
375 }
376 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
39236c6e 377 return EBADEXEC;
0a7de745 378 }
3e170ce0 379 hashtype = cs_find_md(cd->hashType);
0a7de745 380 if (hashtype == NULL) {
39236c6e 381 return EBADEXEC;
0a7de745 382 }
39236c6e 383
0a7de745 384 if (cd->hashSize != hashtype->cs_size) {
3e170ce0 385 return EBADEXEC;
0a7de745 386 }
3e170ce0 387
0a7de745 388 if (length < ntohl(cd->hashOffset)) {
39236c6e 389 return EBADEXEC;
0a7de745 390 }
39236c6e
A
391
392 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
0a7de745 393 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
39236c6e 394 return EBADEXEC;
0a7de745 395 }
39236c6e
A
396
397 /* check that codeslots fits in the buffer */
0a7de745 398 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
39236c6e 399 return EBADEXEC;
0a7de745 400 }
39236c6e 401
0a7de745
A
402 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
403 if (length < ntohl(cd->scatterOffset)) {
39236c6e 404 return EBADEXEC;
0a7de745 405 }
39236c6e 406
3e170ce0 407 const SC_Scatter *scatter = (const SC_Scatter *)
0a7de745 408 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
39236c6e
A
409 uint32_t nPages = 0;
410
411 /*
412 * Check each scatter buffer, since we don't know the
413 * length of the scatter buffer array, we have to
414 * check each entry.
415 */
0a7de745 416 while (1) {
39236c6e 417 /* check that the end of each scatter buffer in within the length */
0a7de745 418 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
39236c6e 419 return EBADEXEC;
0a7de745 420 }
39236c6e 421 uint32_t scount = ntohl(scatter->count);
0a7de745 422 if (scount == 0) {
39236c6e 423 break;
0a7de745
A
424 }
425 if (nPages + scount < nPages) {
39236c6e 426 return EBADEXEC;
0a7de745 427 }
39236c6e
A
428 nPages += scount;
429 scatter++;
430
431 /* XXX check that basees doesn't overlap */
432 /* XXX check that targetOffset doesn't overlap */
433 }
434#if 0 /* rdar://12579439 */
0a7de745 435 if (nPages != ntohl(cd->nCodeSlots)) {
39236c6e 436 return EBADEXEC;
0a7de745 437 }
39236c6e
A
438#endif
439 }
440
0a7de745 441 if (length < ntohl(cd->identOffset)) {
39236c6e 442 return EBADEXEC;
0a7de745 443 }
39236c6e
A
444
445 /* identifier is NUL terminated string */
446 if (cd->identOffset) {
3e170ce0 447 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
0a7de745 448 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
39236c6e 449 return EBADEXEC;
0a7de745 450 }
39236c6e
A
451 }
452
fe8ab488
A
453 /* team identifier is NULL terminated string */
454 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
0a7de745 455 if (length < ntohl(cd->teamOffset)) {
fe8ab488 456 return EBADEXEC;
0a7de745 457 }
fe8ab488 458
3e170ce0 459 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
0a7de745 460 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
fe8ab488 461 return EBADEXEC;
0a7de745 462 }
fe8ab488
A
463 }
464
39236c6e
A
465 return 0;
466}
467
468/*
469 *
470 */
471
472static int
473cs_validate_blob(const CS_GenericBlob *blob, size_t length)
474{
0a7de745 475 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
39236c6e 476 return EBADEXEC;
0a7de745 477 }
39236c6e
A
478 return 0;
479}
480
481/*
482 * cs_validate_csblob
483 *
484 * Validate that superblob/embedded code directory to make sure that
485 * all internal pointers are valid.
486 *
487 * Will validate both a superblob csblob and a "raw" code directory.
488 *
489 *
490 * Parameters: buffer Pointer to code signature
491 * length Length of buffer
492 * rcd returns pointer to code directory
493 *
494 * Returns: 0 Success
495 * EBADEXEC Invalid code signature
496 */
497
498static int
813fb2f6
A
499cs_validate_csblob(
500 const uint8_t *addr,
d9a64523 501 const size_t blob_size,
813fb2f6
A
502 const CS_CodeDirectory **rcd,
503 const CS_GenericBlob **rentitlements)
39236c6e 504{
813fb2f6 505 const CS_GenericBlob *blob;
39236c6e 506 int error;
d9a64523 507 size_t length;
39236c6e
A
508
509 *rcd = NULL;
39037602 510 *rentitlements = NULL;
39236c6e 511
813fb2f6 512 blob = (const CS_GenericBlob *)(const void *)addr;
813fb2f6
A
513
514 length = blob_size;
39236c6e 515 error = cs_validate_blob(blob, length);
0a7de745 516 if (error) {
39236c6e 517 return error;
0a7de745 518 }
39236c6e
A
519 length = ntohl(blob->length);
520
521 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
490019cf
A
522 const CS_SuperBlob *sb;
523 uint32_t n, count;
524 const CS_CodeDirectory *best_cd = NULL;
525 unsigned int best_rank = 0;
5ba3f43e
A
526#if PLATFORM_WatchOS
527 const CS_CodeDirectory *sha1_cd = NULL;
528#endif
39236c6e 529
0a7de745 530 if (length < sizeof(CS_SuperBlob)) {
39236c6e 531 return EBADEXEC;
0a7de745 532 }
39236c6e 533
490019cf
A
534 sb = (const CS_SuperBlob *)blob;
535 count = ntohl(sb->count);
536
39236c6e 537 /* check that the array of BlobIndex fits in the rest of the data */
0a7de745 538 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
39236c6e 539 return EBADEXEC;
0a7de745 540 }
39236c6e
A
541
542 /* now check each BlobIndex */
543 for (n = 0; n < count; n++) {
544 const CS_BlobIndex *blobIndex = &sb->index[n];
490019cf
A
545 uint32_t type = ntohl(blobIndex->type);
546 uint32_t offset = ntohl(blobIndex->offset);
0a7de745 547 if (length < offset) {
39236c6e 548 return EBADEXEC;
0a7de745 549 }
39236c6e
A
550
551 const CS_GenericBlob *subBlob =
0a7de745 552 (const CS_GenericBlob *)(const void *)(addr + offset);
39236c6e 553
490019cf 554 size_t subLength = length - offset;
39236c6e 555
0a7de745 556 if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
39236c6e 557 return error;
0a7de745 558 }
39236c6e
A
559 subLength = ntohl(subBlob->length);
560
561 /* extra validation for CDs, that is also returned */
490019cf
A
562 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
563 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
0a7de745 564 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
39236c6e 565 return error;
0a7de745 566 }
490019cf 567 unsigned int rank = hash_rank(candidate);
0a7de745 568 if (cs_debug > 3) {
490019cf 569 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
0a7de745 570 }
490019cf
A
571 if (best_cd == NULL || rank > best_rank) {
572 best_cd = candidate;
573 best_rank = rank;
39037602 574
0a7de745 575 if (cs_debug > 2) {
39037602 576 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
0a7de745 577 }
39037602 578 *rcd = best_cd;
490019cf
A
579 } else if (best_cd != NULL && rank == best_rank) {
580 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
39037602
A
581 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
582 return EBADEXEC;
583 }
5ba3f43e
A
584#if PLATFORM_WatchOS
585 if (candidate->hashType == CS_HASHTYPE_SHA1) {
586 if (sha1_cd != NULL) {
587 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
588 return EBADEXEC;
589 }
590 sha1_cd = candidate;
591 }
592#endif
39037602
A
593 } else if (type == CSSLOT_ENTITLEMENTS) {
594 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
595 return EBADEXEC;
596 }
597 if (*rentitlements != NULL) {
598 printf("multiple entitlements blobs\n");
490019cf
A
599 return EBADEXEC;
600 }
39037602 601 *rentitlements = subBlob;
39236c6e
A
602 }
603 }
604
5ba3f43e
A
605#if PLATFORM_WatchOS
606 /* To keep watchOS fast enough, we have to resort to sha1 for
607 * some code.
608 *
609 * At the time of writing this comment, known sha1 attacks are
610 * collision attacks (not preimage or second preimage
611 * attacks), which do not apply to platform binaries since
612 * they have a fixed hash in the trust cache. Given this
613 * property, we only prefer sha1 code directories for adhoc
614 * signatures, which always have to be in a trust cache to be
615 * valid (can-load-cdhash does not exist for watchOS). Those
616 * are, incidentally, also the platform binaries, for which we
617 * care about the performance hit that sha256 would bring us.
618 *
619 * Platform binaries may still contain a (not chosen) sha256
620 * code directory, which keeps software updates that switch to
621 * sha256-only small.
622 */
623
624 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
625 if (sha1_cd->flags != (*rcd)->flags) {
626 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
0a7de745 627 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
5ba3f43e
A
628 *rcd = NULL;
629 return EBADEXEC;
630 }
631
632 *rcd = sha1_cd;
633 }
634#endif
39236c6e 635 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
0a7de745 636 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
39236c6e 637 return error;
0a7de745 638 }
39236c6e
A
639 *rcd = (const CS_CodeDirectory *)blob;
640 } else {
641 return EBADEXEC;
642 }
643
0a7de745 644 if (*rcd == NULL) {
39236c6e 645 return EBADEXEC;
0a7de745 646 }
39236c6e
A
647
648 return 0;
649}
650
651/*
652 * cs_find_blob_bytes
653 *
654 * Find an blob from the superblob/code directory. The blob must have
655 * been been validated by cs_validate_csblob() before calling
3e170ce0 656 * this. Use csblob_find_blob() instead.
0a7de745 657 *
39236c6e
A
658 * Will also find a "raw" code directory if its stored as well as
659 * searching the superblob.
660 *
661 * Parameters: buffer Pointer to code signature
662 * length Length of buffer
663 * type type of blob to find
664 * magic the magic number for that blob
665 *
666 * Returns: pointer Success
667 * NULL Buffer not found
668 */
669
3e170ce0
A
670const CS_GenericBlob *
671csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
39236c6e 672{
3e170ce0 673 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
39236c6e
A
674
675 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
676 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
677 size_t n, count = ntohl(sb->count);
678
679 for (n = 0; n < count; n++) {
0a7de745 680 if (ntohl(sb->index[n].type) != type) {
39236c6e 681 continue;
0a7de745 682 }
39236c6e 683 uint32_t offset = ntohl(sb->index[n].offset);
0a7de745 684 if (length - sizeof(const CS_GenericBlob) < offset) {
39236c6e 685 return NULL;
0a7de745 686 }
3e170ce0 687 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
0a7de745 688 if (ntohl(blob->magic) != magic) {
39236c6e 689 continue;
0a7de745 690 }
39236c6e
A
691 return blob;
692 }
693 } else if (type == CSSLOT_CODEDIRECTORY
0a7de745
A
694 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
695 && magic == CSMAGIC_CODEDIRECTORY) {
39236c6e 696 return blob;
0a7de745 697 }
39236c6e
A
698 return NULL;
699}
700
701
fe8ab488 702const CS_GenericBlob *
3e170ce0 703csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
39236c6e 704{
0a7de745 705 if ((csblob->csb_flags & CS_VALID) == 0) {
39236c6e 706 return NULL;
0a7de745 707 }
3e170ce0 708 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
39236c6e
A
709}
710
711static const uint8_t *
3e170ce0 712find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
39236c6e
A
713{
714 /* there is no zero special slot since that is the first code slot */
0a7de745 715 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
39236c6e 716 return NULL;
0a7de745 717 }
39236c6e 718
0a7de745 719 return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
39236c6e
A
720}
721
3e170ce0 722static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
39236c6e 723
6d2010ae 724int
3e170ce0 725csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
6d2010ae 726{
3e170ce0 727 uint8_t computed_hash[CS_HASH_MAX_SIZE];
39236c6e
A
728 const CS_GenericBlob *entitlements;
729 const CS_CodeDirectory *code_dir;
39236c6e 730 const uint8_t *embedded_hash;
3e170ce0 731 union cs_hash_union context;
39236c6e
A
732
733 *out_start = NULL;
734 *out_length = 0;
735
0a7de745
A
736 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
737 return EBADEXEC;
738 }
39236c6e 739
490019cf 740 code_dir = csblob->csb_cd;
39236c6e 741
39037602
A
742 if ((csblob->csb_flags & CS_VALID) == 0) {
743 entitlements = NULL;
744 } else {
745 entitlements = csblob->csb_entitlements_blob;
746 }
3e170ce0 747 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
39236c6e
A
748
749 if (embedded_hash == NULL) {
0a7de745 750 if (entitlements) {
39236c6e 751 return EBADEXEC;
0a7de745 752 }
39236c6e 753 return 0;
490019cf
A
754 } else if (entitlements == NULL) {
755 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
756 return EBADEXEC;
757 } else {
758 return 0;
759 }
6d2010ae 760 }
39236c6e 761
3e170ce0
A
762 csblob->csb_hashtype->cs_init(&context);
763 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
764 csblob->csb_hashtype->cs_final(computed_hash, &context);
765
0a7de745 766 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
39236c6e 767 return EBADEXEC;
0a7de745 768 }
39236c6e 769
3e170ce0 770 *out_start = __DECONST(void *, entitlements);
39236c6e
A
771 *out_length = ntohl(entitlements->length);
772
773 return 0;
774}
775
6d2010ae 776/*
3e170ce0
A
777 * CODESIGNING
778 * End of routines to navigate code signing data structures in the kernel.
6d2010ae
A
779 */
780
781
2d21ac55 782
1c79356b 783/*
2d21ac55 784 * ubc_init
0a7de745 785 *
2d21ac55
A
786 * Initialization of the zone for Unified Buffer Cache.
787 *
788 * Parameters: (void)
789 *
790 * Returns: (void)
791 *
792 * Implicit returns:
793 * ubc_info_zone(global) initialized for subsequent allocations
1c79356b 794 */
0b4e3aa0 795__private_extern__ void
2d21ac55 796ubc_init(void)
1c79356b 797{
0a7de745 798 int i;
1c79356b 799
0a7de745 800 i = (vm_size_t) sizeof(struct ubc_info);
2d21ac55 801
0a7de745 802 ubc_info_zone = zinit(i, 10000 * i, 8192, "ubc_info zone");
0b4c1975
A
803
804 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
1c79356b
A
805}
806
2d21ac55 807
1c79356b 808/*
2d21ac55
A
809 * ubc_info_init
810 *
811 * Allocate and attach an empty ubc_info structure to a vnode
812 *
813 * Parameters: vp Pointer to the vnode
814 *
815 * Returns: 0 Success
816 * vnode_size:ENOMEM Not enough space
817 * vnode_size:??? Other error from vnode_getattr
818 *
1c79356b
A
819 */
820int
821ubc_info_init(struct vnode *vp)
91447636 822{
0a7de745 823 return ubc_info_init_internal(vp, 0, 0);
91447636 824}
2d21ac55
A
825
826
827/*
828 * ubc_info_init_withsize
829 *
830 * Allocate and attach a sized ubc_info structure to a vnode
831 *
832 * Parameters: vp Pointer to the vnode
833 * filesize The size of the file
834 *
835 * Returns: 0 Success
836 * vnode_size:ENOMEM Not enough space
837 * vnode_size:??? Other error from vnode_getattr
838 */
91447636
A
839int
840ubc_info_init_withsize(struct vnode *vp, off_t filesize)
841{
0a7de745 842 return ubc_info_init_internal(vp, 1, filesize);
91447636
A
843}
844
2d21ac55
A
845
846/*
847 * ubc_info_init_internal
848 *
849 * Allocate and attach a ubc_info structure to a vnode
850 *
851 * Parameters: vp Pointer to the vnode
852 * withfsize{0,1} Zero if the size should be obtained
853 * from the vnode; otherwise, use filesize
854 * filesize The size of the file, if withfsize == 1
855 *
856 * Returns: 0 Success
857 * vnode_size:ENOMEM Not enough space
858 * vnode_size:??? Other error from vnode_getattr
859 *
860 * Notes: We call a blocking zalloc(), and the zone was created as an
861 * expandable and collectable zone, so if no memory is available,
862 * it is possible for zalloc() to block indefinitely. zalloc()
863 * may also panic if the zone of zones is exhausted, since it's
864 * NOT expandable.
865 *
866 * We unconditionally call vnode_pager_setup(), even if this is
867 * a reuse of a ubc_info; in that case, we should probably assert
868 * that it does not already have a pager association, but do not.
869 *
870 * Since memory_object_create_named() can only fail from receiving
871 * an invalid pager argument, the explicit check and panic is
872 * merely precautionary.
873 */
874static int
875ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1c79356b 876{
0a7de745 877 struct ubc_info *uip;
1c79356b 878 void * pager;
1c79356b
A
879 int error = 0;
880 kern_return_t kret;
0b4e3aa0 881 memory_object_control_t control;
1c79356b 882
91447636 883 uip = vp->v_ubcinfo;
1c79356b 884
2d21ac55
A
885 /*
886 * If there is not already a ubc_info attached to the vnode, we
887 * attach one; otherwise, we will reuse the one that's there.
888 */
91447636 889 if (uip == UBC_INFO_NULL) {
1c79356b 890 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
891 bzero((char *)uip, sizeof(struct ubc_info));
892
1c79356b 893 uip->ui_vnode = vp;
91447636 894 uip->ui_flags = UI_INITED;
1c79356b
A
895 uip->ui_ucred = NOCRED;
896 }
1c79356b
A
897 assert(uip->ui_flags != UI_NONE);
898 assert(uip->ui_vnode == vp);
899
1c79356b
A
900 /* now set this ubc_info in the vnode */
901 vp->v_ubcinfo = uip;
91447636 902
2d21ac55
A
903 /*
904 * Allocate a pager object for this vnode
905 *
906 * XXX The value of the pager parameter is currently ignored.
907 * XXX Presumably, this API changed to avoid the race between
908 * XXX setting the pager and the UI_HASPAGER flag.
909 */
1c79356b
A
910 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
911 assert(pager);
91447636 912
2d21ac55
A
913 /*
914 * Explicitly set the pager into the ubc_info, after setting the
915 * UI_HASPAGER flag.
916 */
91447636
A
917 SET(uip->ui_flags, UI_HASPAGER);
918 uip->ui_pager = pager;
1c79356b
A
919
920 /*
91447636 921 * Note: We can not use VNOP_GETATTR() to get accurate
2d21ac55
A
922 * value of ui_size because this may be an NFS vnode, and
923 * nfs_getattr() can call vinvalbuf(); if this happens,
924 * ubc_info is not set up to deal with that event.
1c79356b
A
925 * So use bogus size.
926 */
927
1c79356b 928 /*
0b4e3aa0
A
929 * create a vnode - vm_object association
930 * memory_object_create_named() creates a "named" reference on the
931 * memory object we hold this reference as long as the vnode is
932 * "alive." Since memory_object_create_named() took its own reference
933 * on the vnode pager we passed it, we can drop the reference
934 * vnode_pager_setup() returned here.
1c79356b 935 */
0b4e3aa0 936 kret = memory_object_create_named(pager,
0a7de745
A
937 (memory_object_size_t)uip->ui_size, &control);
938 vnode_pager_deallocate(pager);
939 if (kret != KERN_SUCCESS) {
0b4e3aa0 940 panic("ubc_info_init: memory_object_create_named returned %d", kret);
0a7de745 941 }
1c79356b 942
0b4e3aa0 943 assert(control);
0a7de745
A
944 uip->ui_control = control; /* cache the value of the mo control */
945 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
2d21ac55 946
91447636 947 if (withfsize == 0) {
91447636 948 /* initialize the size */
2d21ac55 949 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
0a7de745 950 if (error) {
91447636 951 uip->ui_size = 0;
0a7de745 952 }
91447636
A
953 } else {
954 uip->ui_size = filesize;
955 }
0a7de745 956 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
1c79356b 957
0a7de745 958 return error;
1c79356b
A
959}
960
2d21ac55
A
961
962/*
963 * ubc_info_free
964 *
965 * Free a ubc_info structure
966 *
967 * Parameters: uip A pointer to the ubc_info to free
968 *
969 * Returns: (void)
970 *
971 * Notes: If there is a credential that has subsequently been associated
972 * with the ubc_info via a call to ubc_setcred(), the reference
973 * to the credential is dropped.
974 *
975 * It's actually impossible for a ubc_info.ui_control to take the
976 * value MEMORY_OBJECT_CONTROL_NULL.
977 */
0b4e3aa0
A
978static void
979ubc_info_free(struct ubc_info *uip)
1c79356b 980{
0c530ab8
A
981 if (IS_VALID_CRED(uip->ui_ucred)) {
982 kauth_cred_unref(&uip->ui_ucred);
1c79356b 983 }
0b4e3aa0 984
0a7de745 985 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 986 memory_object_control_deallocate(uip->ui_control);
0a7de745
A
987 }
988
91447636 989 cluster_release(uip);
2d21ac55 990 ubc_cs_free(uip);
0b4e3aa0 991
2d21ac55 992 zfree(ubc_info_zone, uip);
1c79356b
A
993 return;
994}
995
2d21ac55 996
0b4e3aa0
A
997void
998ubc_info_deallocate(struct ubc_info *uip)
999{
0a7de745 1000 ubc_info_free(uip);
0b4e3aa0
A
1001}
1002
0a7de745
A
1003errno_t
1004mach_to_bsd_errno(kern_return_t mach_err)
fe8ab488
A
1005{
1006 switch (mach_err) {
1007 case KERN_SUCCESS:
1008 return 0;
1009
1010 case KERN_INVALID_ADDRESS:
1011 case KERN_INVALID_ARGUMENT:
1012 case KERN_NOT_IN_SET:
1013 case KERN_INVALID_NAME:
1014 case KERN_INVALID_TASK:
1015 case KERN_INVALID_RIGHT:
1016 case KERN_INVALID_VALUE:
1017 case KERN_INVALID_CAPABILITY:
1018 case KERN_INVALID_HOST:
1019 case KERN_MEMORY_PRESENT:
1020 case KERN_INVALID_PROCESSOR_SET:
1021 case KERN_INVALID_POLICY:
1022 case KERN_ALREADY_WAITING:
1023 case KERN_DEFAULT_SET:
1024 case KERN_EXCEPTION_PROTECTED:
1025 case KERN_INVALID_LEDGER:
1026 case KERN_INVALID_MEMORY_CONTROL:
1027 case KERN_INVALID_SECURITY:
1028 case KERN_NOT_DEPRESSED:
1029 case KERN_LOCK_OWNED:
1030 case KERN_LOCK_OWNED_SELF:
1031 return EINVAL;
1032
1033 case KERN_PROTECTION_FAILURE:
1034 case KERN_NOT_RECEIVER:
1035 case KERN_NO_ACCESS:
1036 case KERN_POLICY_STATIC:
1037 return EACCES;
1038
1039 case KERN_NO_SPACE:
1040 case KERN_RESOURCE_SHORTAGE:
1041 case KERN_UREFS_OVERFLOW:
1042 case KERN_INVALID_OBJECT:
1043 return ENOMEM;
1044
1045 case KERN_FAILURE:
1046 return EIO;
1047
1048 case KERN_MEMORY_FAILURE:
1049 case KERN_POLICY_LIMIT:
1050 case KERN_CODESIGN_ERROR:
1051 return EPERM;
1052
1053 case KERN_MEMORY_ERROR:
1054 return EBUSY;
1055
1056 case KERN_ALREADY_IN_SET:
1057 case KERN_NAME_EXISTS:
1058 case KERN_RIGHT_EXISTS:
1059 return EEXIST;
1060
1061 case KERN_ABORTED:
1062 return EINTR;
1063
1064 case KERN_TERMINATED:
1065 case KERN_LOCK_SET_DESTROYED:
1066 case KERN_LOCK_UNSTABLE:
1067 case KERN_SEMAPHORE_DESTROYED:
1068 return ENOENT;
1069
1070 case KERN_RPC_SERVER_TERMINATED:
1071 return ECONNRESET;
1072
1073 case KERN_NOT_SUPPORTED:
1074 return ENOTSUP;
1075
1076 case KERN_NODE_DOWN:
1077 return ENETDOWN;
1078
1079 case KERN_NOT_WAITING:
1080 return ENOENT;
1081
1082 case KERN_OPERATION_TIMED_OUT:
1083 return ETIMEDOUT;
1084
1085 default:
1086 return EIO;
1087 }
1088}
2d21ac55 1089
1c79356b 1090/*
fe8ab488 1091 * ubc_setsize_ex
2d21ac55 1092 *
fe8ab488 1093 * Tell the VM that the the size of the file represented by the vnode has
2d21ac55
A
1094 * changed
1095 *
fe8ab488
A
1096 * Parameters: vp The vp whose backing file size is
1097 * being changed
1098 * nsize The new size of the backing file
1099 * opts Options
1100 *
1101 * Returns: EINVAL for new size < 0
1102 * ENOENT if no UBC info exists
1103 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1104 * Other errors (mapped to errno_t) returned by VM functions
1105 *
1106 * Notes: This function will indicate success if the new size is the
1107 * same or larger than the old size (in this case, the
1108 * remainder of the file will require modification or use of
1109 * an existing upl to access successfully).
1110 *
1111 * This function will fail if the new file size is smaller,
1112 * and the memory region being invalidated was unable to
1113 * actually be invalidated and/or the last page could not be
1114 * flushed, if the new size is not aligned to a page
1115 * boundary. This is usually indicative of an I/O error.
1c79356b 1116 */
0a7de745
A
1117errno_t
1118ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1c79356b 1119{
0a7de745 1120 off_t osize; /* ui_size before change */
1c79356b
A
1121 off_t lastpg, olastpgend, lastoff;
1122 struct ubc_info *uip;
0b4e3aa0 1123 memory_object_control_t control;
2d21ac55 1124 kern_return_t kret = KERN_SUCCESS;
1c79356b 1125
0a7de745 1126 if (nsize < (off_t)0) {
fe8ab488 1127 return EINVAL;
0a7de745 1128 }
1c79356b 1129
0a7de745 1130 if (!UBCINFOEXISTS(vp)) {
fe8ab488 1131 return ENOENT;
0a7de745 1132 }
1c79356b
A
1133
1134 uip = vp->v_ubcinfo;
2d21ac55 1135 osize = uip->ui_size;
fe8ab488 1136
0a7de745 1137 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
fe8ab488 1138 return EAGAIN;
0a7de745 1139 }
fe8ab488 1140
2d21ac55
A
1141 /*
1142 * Update the size before flushing the VM
1143 */
1c79356b
A
1144 uip->ui_size = nsize;
1145
0a7de745 1146 if (nsize >= osize) { /* Nothing more to do */
6d2010ae
A
1147 if (nsize > osize) {
1148 lock_vnode_and_post(vp, NOTE_EXTEND);
1149 }
1150
fe8ab488 1151 return 0;
b0d623f7 1152 }
1c79356b
A
1153
1154 /*
1155 * When the file shrinks, invalidate the pages beyond the
1156 * new size. Also get rid of garbage beyond nsize on the
2d21ac55
A
1157 * last page. The ui_size already has the nsize, so any
1158 * subsequent page-in will zero-fill the tail properly
1c79356b 1159 */
1c79356b
A
1160 lastpg = trunc_page_64(nsize);
1161 olastpgend = round_page_64(osize);
0b4e3aa0
A
1162 control = uip->ui_control;
1163 assert(control);
1c79356b
A
1164 lastoff = (nsize & PAGE_MASK_64);
1165
2d21ac55 1166 if (lastoff) {
0a7de745
A
1167 upl_t upl;
1168 upl_page_info_t *pl;
2d21ac55 1169
fe8ab488 1170 /*
2d21ac55 1171 * new EOF ends up in the middle of a page
fe8ab488 1172 * zero the tail of this page if it's currently
2d21ac55
A
1173 * present in the cache
1174 */
b226f5e5 1175 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
fe8ab488 1176
0a7de745
A
1177 if (kret != KERN_SUCCESS) {
1178 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1179 }
2d21ac55 1180
0a7de745
A
1181 if (upl_valid_page(pl, 0)) {
1182 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1183 }
2d21ac55
A
1184
1185 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 1186
2d21ac55
A
1187 lastpg += PAGE_SIZE_64;
1188 }
1189 if (olastpgend > lastpg) {
0a7de745 1190 int flags;
b0d623f7 1191
0a7de745 1192 if (lastpg == 0) {
b0d623f7 1193 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
0a7de745 1194 } else {
b0d623f7 1195 flags = MEMORY_OBJECT_DATA_FLUSH;
0a7de745 1196 }
fe8ab488 1197 /*
2d21ac55
A
1198 * invalidate the pages beyond the new EOF page
1199 *
1200 */
fe8ab488 1201 kret = memory_object_lock_request(control,
0a7de745
A
1202 (memory_object_offset_t)lastpg,
1203 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1204 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1205 if (kret != KERN_SUCCESS) {
1206 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1207 }
2d21ac55 1208 }
fe8ab488 1209 return mach_to_bsd_errno(kret);
1c79356b
A
1210}
1211
fe8ab488 1212// Returns true for success
0a7de745
A
1213int
1214ubc_setsize(vnode_t vp, off_t nsize)
fe8ab488
A
1215{
1216 return ubc_setsize_ex(vp, nsize, 0) == 0;
1217}
2d21ac55 1218
1c79356b 1219/*
2d21ac55
A
1220 * ubc_getsize
1221 *
1222 * Get the size of the file assocated with the specified vnode
1223 *
1224 * Parameters: vp The vnode whose size is of interest
1225 *
1226 * Returns: 0 There is no ubc_info associated with
1227 * this vnode, or the size is zero
1228 * !0 The size of the file
1229 *
1230 * Notes: Using this routine, it is not possible for a caller to
1231 * successfully distinguish between a vnode associate with a zero
1232 * length file, and a vnode with no associated ubc_info. The
1233 * caller therefore needs to not care, or needs to ensure that
1234 * they have previously successfully called ubc_info_init() or
1235 * ubc_info_init_withsize().
1c79356b
A
1236 */
1237off_t
1238ubc_getsize(struct vnode *vp)
1239{
91447636 1240 /* people depend on the side effect of this working this way
0a7de745 1241 * as they call this for directory
1c79356b 1242 */
0a7de745
A
1243 if (!UBCINFOEXISTS(vp)) {
1244 return (off_t)0;
1245 }
1246 return vp->v_ubcinfo->ui_size;
1c79356b
A
1247}
1248
2d21ac55 1249
1c79356b 1250/*
2d21ac55
A
1251 * ubc_umount
1252 *
fe8ab488 1253 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
2d21ac55
A
1254 * mount point
1255 *
1256 * Parameters: mp The mount point
1257 *
1258 * Returns: 0 Success
1259 *
1260 * Notes: There is no failure indication for this function.
1261 *
1262 * This function is used in the unmount path; since it may block
1263 * I/O indefinitely, it should not be used in the forced unmount
1264 * path, since a device unavailability could also block that
1265 * indefinitely.
1266 *
1267 * Because there is no device ejection interlock on USB, FireWire,
1268 * or similar devices, it's possible that an ejection that begins
1269 * subsequent to the vnode_iterate() completing, either on one of
1270 * those devices, or a network mount for which the server quits
1271 * responding, etc., may cause the caller to block indefinitely.
1c79356b 1272 */
0b4e3aa0 1273__private_extern__ int
1c79356b
A
1274ubc_umount(struct mount *mp)
1275{
91447636 1276 vnode_iterate(mp, 0, ubc_umcallback, 0);
0a7de745 1277 return 0;
1c79356b
A
1278}
1279
2d21ac55
A
1280
1281/*
1282 * ubc_umcallback
1283 *
1284 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1285 * and vnode_iterate() for details of implementation.
1286 */
91447636
A
1287static int
1288ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 1289{
91447636 1290 if (UBCINFOEXISTS(vp)) {
91447636 1291 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 1292 }
0a7de745 1293 return VNODE_RETURNED;
1c79356b
A
1294}
1295
91447636 1296
2d21ac55
A
1297/*
1298 * ubc_getcred
1299 *
1300 * Get the credentials currently active for the ubc_info associated with the
1301 * vnode.
1302 *
1303 * Parameters: vp The vnode whose ubc_info credentials
1304 * are to be retrieved
1305 *
1306 * Returns: !NOCRED The credentials
1307 * NOCRED If there is no ubc_info for the vnode,
1308 * or if there is one, but it has not had
1309 * any credentials associated with it via
1310 * a call to ubc_setcred()
1311 */
91447636 1312kauth_cred_t
1c79356b
A
1313ubc_getcred(struct vnode *vp)
1314{
0a7de745
A
1315 if (UBCINFOEXISTS(vp)) {
1316 return vp->v_ubcinfo->ui_ucred;
1317 }
1c79356b 1318
0a7de745 1319 return NOCRED;
1c79356b
A
1320}
1321
2d21ac55
A
1322
1323/*
1324 * ubc_setthreadcred
1325 *
1326 * If they are not already set, set the credentials of the ubc_info structure
1327 * associated with the vnode to those of the supplied thread; otherwise leave
1328 * them alone.
1329 *
1330 * Parameters: vp The vnode whose ubc_info creds are to
1331 * be set
1332 * p The process whose credentials are to
1333 * be used, if not running on an assumed
1334 * credential
1335 * thread The thread whose credentials are to
1336 * be used
1337 *
1338 * Returns: 1 This vnode has no associated ubc_info
1339 * 0 Success
1340 *
1341 * Notes: This function takes a proc parameter to account for bootstrap
1342 * issues where a task or thread may call this routine, either
1343 * before credentials have been initialized by bsd_init(), or if
1344 * there is no BSD info asscoiate with a mach thread yet. This
1345 * is known to happen in both the initial swap and memory mapping
1346 * calls.
1347 *
1348 * This function is generally used only in the following cases:
1349 *
1350 * o a memory mapped file via the mmap() system call
2d21ac55
A
1351 * o a swap store backing file
1352 * o subsequent to a successful write via vn_write()
1353 *
1354 * The information is then used by the NFS client in order to
1355 * cons up a wire message in either the page-in or page-out path.
1356 *
1357 * There are two potential problems with the use of this API:
1358 *
1359 * o Because the write path only set it on a successful
1360 * write, there is a race window between setting the
1361 * credential and its use to evict the pages to the
1362 * remote file server
1363 *
1364 * o Because a page-in may occur prior to a write, the
1365 * credential may not be set at this time, if the page-in
fe8ab488 1366 * is not the result of a mapping established via mmap().
2d21ac55
A
1367 *
1368 * In both these cases, this will be triggered from the paging
1369 * path, which will instead use the credential of the current
1370 * process, which in this case is either the dynamic_pager or
1371 * the kernel task, both of which utilize "root" credentials.
1372 *
1373 * This may potentially permit operations to occur which should
1374 * be denied, or it may cause to be denied operations which
1375 * should be permitted, depending on the configuration of the NFS
1376 * server.
1377 */
13fec989 1378int
2d21ac55 1379ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
13fec989
A
1380{
1381 struct ubc_info *uip;
1382 kauth_cred_t credp;
2d21ac55 1383 struct uthread *uthread = get_bsdthread_info(thread);
13fec989 1384
0a7de745
A
1385 if (!UBCINFOEXISTS(vp)) {
1386 return 1;
1387 }
13fec989
A
1388
1389 vnode_lock(vp);
1390
1391 uip = vp->v_ubcinfo;
1392 credp = uip->ui_ucred;
1393
0c530ab8 1394 if (!IS_VALID_CRED(credp)) {
13fec989
A
1395 /* use per-thread cred, if assumed identity, else proc cred */
1396 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1397 uip->ui_ucred = kauth_cred_proc_ref(p);
1398 } else {
1399 uip->ui_ucred = uthread->uu_ucred;
1400 kauth_cred_ref(uip->ui_ucred);
1401 }
0a7de745 1402 }
13fec989
A
1403 vnode_unlock(vp);
1404
0a7de745 1405 return 0;
13fec989
A
1406}
1407
2d21ac55 1408
1c79356b 1409/*
2d21ac55
A
1410 * ubc_setcred
1411 *
1412 * If they are not already set, set the credentials of the ubc_info structure
1413 * associated with the vnode to those of the process; otherwise leave them
1414 * alone.
1415 *
1416 * Parameters: vp The vnode whose ubc_info creds are to
1417 * be set
1418 * p The process whose credentials are to
1419 * be used
1420 *
1421 * Returns: 0 This vnode has no associated ubc_info
1422 * 1 Success
1423 *
1424 * Notes: The return values for this function are inverted from nearly
1425 * all other uses in the kernel.
1426 *
1427 * See also ubc_setthreadcred(), above.
1428 *
1429 * This function is considered deprecated, and generally should
1430 * not be used, as it is incompatible with per-thread credentials;
1431 * it exists for legacy KPI reasons.
1432 *
0a7de745 1433 * DEPRECATION: ubc_setcred() is being deprecated. Please use
2d21ac55 1434 * ubc_setthreadcred() instead.
1c79356b 1435 */
1c79356b 1436int
2d21ac55 1437ubc_setcred(struct vnode *vp, proc_t p)
1c79356b
A
1438{
1439 struct ubc_info *uip;
91447636 1440 kauth_cred_t credp;
1c79356b 1441
2d21ac55 1442 /* If there is no ubc_info, deny the operation */
0a7de745
A
1443 if (!UBCINFOEXISTS(vp)) {
1444 return 0;
1445 }
1c79356b 1446
2d21ac55
A
1447 /*
1448 * Check to see if there is already a credential reference in the
1449 * ubc_info; if there is not, take one on the supplied credential.
1450 */
91447636 1451 vnode_lock(vp);
91447636 1452 uip = vp->v_ubcinfo;
1c79356b 1453 credp = uip->ui_ucred;
0c530ab8 1454 if (!IS_VALID_CRED(credp)) {
91447636 1455 uip->ui_ucred = kauth_cred_proc_ref(p);
0a7de745 1456 }
91447636 1457 vnode_unlock(vp);
1c79356b 1458
0a7de745 1459 return 1;
1c79356b
A
1460}
1461
2d21ac55
A
1462/*
1463 * ubc_getpager
1464 *
1465 * Get the pager associated with the ubc_info associated with the vnode.
1466 *
1467 * Parameters: vp The vnode to obtain the pager from
1468 *
1469 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1470 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1471 *
1472 * Notes: For each vnode that has a ubc_info associated with it, that
1473 * ubc_info SHALL have a pager associated with it, so in the
1474 * normal case, it's impossible to return VNODE_PAGER_NULL for
1475 * a vnode with an associated ubc_info.
1476 */
0b4e3aa0 1477__private_extern__ memory_object_t
1c79356b
A
1478ubc_getpager(struct vnode *vp)
1479{
0a7de745
A
1480 if (UBCINFOEXISTS(vp)) {
1481 return vp->v_ubcinfo->ui_pager;
1482 }
1c79356b 1483
0a7de745 1484 return 0;
1c79356b
A
1485}
1486
2d21ac55 1487
1c79356b 1488/*
2d21ac55
A
1489 * ubc_getobject
1490 *
1491 * Get the memory object control associated with the ubc_info associated with
1492 * the vnode
1493 *
1494 * Parameters: vp The vnode to obtain the memory object
1495 * from
1496 * flags DEPRECATED
1497 *
1498 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1499 * MEMORY_OBJECT_CONTROL_NULL
1500 *
1501 * Notes: Historically, if the flags were not "do not reactivate", this
1502 * function would look up the memory object using the pager if
1503 * it did not exist (this could be the case if the vnode had
1504 * been previously reactivated). The flags would also permit a
1505 * hold to be requested, which would have created an object
1506 * reference, if one had not already existed. This usage is
1507 * deprecated, as it would permit a race between finding and
1508 * taking the reference vs. a single reference being dropped in
1509 * another thread.
1c79356b 1510 */
0b4e3aa0 1511memory_object_control_t
91447636 1512ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 1513{
0a7de745
A
1514 if (UBCINFOEXISTS(vp)) {
1515 return vp->v_ubcinfo->ui_control;
1516 }
1c79356b 1517
0a7de745 1518 return MEMORY_OBJECT_CONTROL_NULL;
1c79356b
A
1519}
1520
2d21ac55
A
1521/*
1522 * ubc_blktooff
1523 *
1524 * Convert a given block number to a memory backing object (file) offset for a
1525 * given vnode
1526 *
1527 * Parameters: vp The vnode in which the block is located
1528 * blkno The block number to convert
1529 *
1530 * Returns: !-1 The offset into the backing object
1531 * -1 There is no ubc_info associated with
1532 * the vnode
1533 * -1 An error occurred in the underlying VFS
1534 * while translating the block to an
1535 * offset; the most likely cause is that
1536 * the caller specified a block past the
1537 * end of the file, but this could also be
1538 * any other error from VNOP_BLKTOOFF().
1539 *
1540 * Note: Representing the error in band loses some information, but does
1541 * not occlude a valid offset, since an off_t of -1 is normally
1542 * used to represent EOF. If we had a more reliable constant in
1543 * our header files for it (i.e. explicitly cast to an off_t), we
1544 * would use it here instead.
1545 */
1c79356b 1546off_t
91447636 1547ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b 1548{
2d21ac55 1549 off_t file_offset = -1;
1c79356b
A
1550 int error;
1551
2d21ac55
A
1552 if (UBCINFOEXISTS(vp)) {
1553 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
0a7de745 1554 if (error) {
2d21ac55 1555 file_offset = -1;
0a7de745 1556 }
2d21ac55 1557 }
1c79356b 1558
0a7de745 1559 return file_offset;
1c79356b 1560}
0b4e3aa0 1561
2d21ac55
A
1562
1563/*
1564 * ubc_offtoblk
1565 *
1566 * Convert a given offset in a memory backing object into a block number for a
1567 * given vnode
1568 *
1569 * Parameters: vp The vnode in which the offset is
1570 * located
1571 * offset The offset into the backing object
1572 *
1573 * Returns: !-1 The returned block number
1574 * -1 There is no ubc_info associated with
1575 * the vnode
1576 * -1 An error occurred in the underlying VFS
1577 * while translating the block to an
1578 * offset; the most likely cause is that
1579 * the caller specified a block past the
1580 * end of the file, but this could also be
1581 * any other error from VNOP_OFFTOBLK().
1582 *
1583 * Note: Representing the error in band loses some information, but does
1584 * not occlude a valid block number, since block numbers exceed
1585 * the valid range for offsets, due to their relative sizes. If
1586 * we had a more reliable constant than -1 in our header files
1587 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1588 * here instead.
1589 */
91447636
A
1590daddr64_t
1591ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 1592{
2d21ac55 1593 daddr64_t blkno = -1;
0b4e3aa0 1594 int error = 0;
1c79356b 1595
2d21ac55
A
1596 if (UBCINFOEXISTS(vp)) {
1597 error = VNOP_OFFTOBLK(vp, offset, &blkno);
0a7de745 1598 if (error) {
2d21ac55 1599 blkno = -1;
0a7de745 1600 }
2d21ac55 1601 }
1c79356b 1602
0a7de745 1603 return blkno;
1c79356b
A
1604}
1605
2d21ac55
A
1606
1607/*
1608 * ubc_pages_resident
1609 *
1610 * Determine whether or not a given vnode has pages resident via the memory
1611 * object control associated with the ubc_info associated with the vnode
1612 *
1613 * Parameters: vp The vnode we want to know about
1614 *
1615 * Returns: 1 Yes
1616 * 0 No
1617 */
1c79356b 1618int
91447636 1619ubc_pages_resident(vnode_t vp)
1c79356b 1620{
0a7de745
A
1621 kern_return_t kret;
1622 boolean_t has_pages_resident;
1623
1624 if (!UBCINFOEXISTS(vp)) {
1625 return 0;
1626 }
1627
2d21ac55
A
1628 /*
1629 * The following call may fail if an invalid ui_control is specified,
1630 * or if there is no VM object associated with the control object. In
1631 * either case, reacting to it as if there were no pages resident will
1632 * result in correct behavior.
1633 */
91447636 1634 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
0a7de745
A
1635
1636 if (kret != KERN_SUCCESS) {
1637 return 0;
1638 }
1639
1640 if (has_pages_resident == TRUE) {
1641 return 1;
1642 }
1643
1644 return 0;
91447636 1645}
1c79356b 1646
0b4e3aa0 1647/*
2d21ac55
A
1648 * ubc_msync
1649 *
1650 * Clean and/or invalidate a range in the memory object that backs this vnode
1651 *
1652 * Parameters: vp The vnode whose associated ubc_info's
1653 * associated memory object is to have a
1654 * range invalidated within it
1655 * beg_off The start of the range, as an offset
1656 * end_off The end of the range, as an offset
1657 * resid_off The address of an off_t supplied by the
1658 * caller; may be set to NULL to ignore
1659 * flags See ubc_msync_internal()
1660 *
1661 * Returns: 0 Success
1662 * !0 Failure; an errno is returned
1663 *
1664 * Implicit Returns:
1665 * *resid_off, modified If non-NULL, the contents are ALWAYS
1666 * modified; they are initialized to the
1667 * beg_off, and in case of an I/O error,
1668 * the difference between beg_off and the
1669 * current value will reflect what was
1670 * able to be written before the error
1671 * occurred. If no error is returned, the
1672 * value of the resid_off is undefined; do
1673 * NOT use it in place of end_off if you
1674 * intend to increment from the end of the
1675 * last call and call iteratively.
1676 *
1677 * Notes: see ubc_msync_internal() for more detailed information.
1678 *
0b4e3aa0 1679 */
91447636
A
1680errno_t
1681ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 1682{
0a7de745 1683 int retval;
91447636 1684 int io_errno = 0;
0b4e3aa0 1685
0a7de745
A
1686 if (resid_off) {
1687 *resid_off = beg_off;
1688 }
1689
1690 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 1691
0a7de745
A
1692 if (retval == 0 && io_errno == 0) {
1693 return EINVAL;
1694 }
1695 return io_errno;
91447636 1696}
0b4e3aa0 1697
1c79356b 1698
1c79356b 1699/*
fe8ab488
A
1700 * ubc_msync_internal
1701 *
2d21ac55
A
1702 * Clean and/or invalidate a range in the memory object that backs this vnode
1703 *
1704 * Parameters: vp The vnode whose associated ubc_info's
1705 * associated memory object is to have a
1706 * range invalidated within it
1707 * beg_off The start of the range, as an offset
1708 * end_off The end of the range, as an offset
1709 * resid_off The address of an off_t supplied by the
1710 * caller; may be set to NULL to ignore
1711 * flags MUST contain at least one of the flags
1712 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1713 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1714 * UBC_SYNC may also be specified to cause
1715 * this function to block until the
1716 * operation is complete. The behavior
1717 * of UBC_SYNC is otherwise undefined.
1718 * io_errno The address of an int to contain the
1719 * errno from a failed I/O operation, if
1720 * one occurs; may be set to NULL to
1721 * ignore
1722 *
1723 * Returns: 1 Success
1724 * 0 Failure
1725 *
1726 * Implicit Returns:
1727 * *resid_off, modified The contents of this offset MAY be
1728 * modified; in case of an I/O error, the
1729 * difference between beg_off and the
1730 * current value will reflect what was
1731 * able to be written before the error
1732 * occurred.
1733 * *io_errno, modified The contents of this offset are set to
1734 * an errno, if an error occurs; if the
1735 * caller supplies an io_errno parameter,
1736 * they should be careful to initialize it
1737 * to 0 before calling this function to
1738 * enable them to distinguish an error
1739 * with a valid *resid_off from an invalid
1740 * one, and to avoid potentially falsely
1741 * reporting an error, depending on use.
1742 *
1743 * Notes: If there is no ubc_info associated with the vnode supplied,
1744 * this function immediately returns success.
1745 *
1746 * If the value of end_off is less than or equal to beg_off, this
1747 * function immediately returns success; that is, end_off is NOT
1748 * inclusive.
1749 *
1750 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1751 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1752 * attempt to block on in-progress I/O by calling this function
1753 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1754 * in order to block pending on the I/O already in progress.
1755 *
1756 * The start offset is truncated to the page boundary and the
1757 * size is adjusted to include the last page in the range; that
1758 * is, end_off on exactly a page boundary will not change if it
1759 * is rounded, and the range of bytes written will be from the
1760 * truncate beg_off to the rounded (end_off - 1).
1c79356b 1761 */
91447636
A
1762static int
1763ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 1764{
0a7de745
A
1765 memory_object_size_t tsize;
1766 kern_return_t kret;
91447636
A
1767 int request_flags = 0;
1768 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
0a7de745
A
1769
1770 if (!UBCINFOEXISTS(vp)) {
1771 return 0;
1772 }
1773 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1774 return 0;
1775 }
1776 if (end_off <= beg_off) {
1777 return 1;
1778 }
1779
1780 if (flags & UBC_INVALIDATE) {
1781 /*
91447636
A
1782 * discard the resident pages
1783 */
1784 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
0a7de745 1785 }
1c79356b 1786
0a7de745
A
1787 if (flags & UBC_SYNC) {
1788 /*
91447636 1789 * wait for all the I/O to complete before returning
55e303ae 1790 */
0a7de745
A
1791 request_flags |= MEMORY_OBJECT_IO_SYNC;
1792 }
55e303ae 1793
0a7de745
A
1794 if (flags & UBC_PUSHDIRTY) {
1795 /*
91447636
A
1796 * we only return the dirty pages in the range
1797 */
0a7de745
A
1798 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1799 }
0b4e3aa0 1800
0a7de745
A
1801 if (flags & UBC_PUSHALL) {
1802 /*
2d21ac55
A
1803 * then return all the interesting pages in the range (both
1804 * dirty and precious) to the pager
91447636 1805 */
0a7de745
A
1806 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1807 }
0b4e3aa0 1808
91447636
A
1809 beg_off = trunc_page_64(beg_off);
1810 end_off = round_page_64(end_off);
1811 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 1812
91447636
A
1813 /* flush and/or invalidate pages in the range requested */
1814 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
0a7de745
A
1815 beg_off, tsize,
1816 (memory_object_offset_t *)resid_off,
1817 io_errno, flush_flags, request_flags,
1818 VM_PROT_NO_CHANGE);
1819
1820 return (kret == KERN_SUCCESS) ? 1 : 0;
1c79356b
A
1821}
1822
1c79356b
A
1823
1824/*
fe8ab488 1825 * ubc_map
2d21ac55
A
1826 *
1827 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1828 * to it for the ubc system, if there isn't one already, so it will not be
1829 * recycled while it's in use, and set flags on the ubc_info to indicate that
1830 * we have done this
1831 *
1832 * Parameters: vp The vnode to map
1833 * flags The mapping flags for the vnode; this
1834 * will be a combination of one or more of
1835 * PROT_READ, PROT_WRITE, and PROT_EXEC
1836 *
1837 * Returns: 0 Success
1838 * EPERM Permission was denied
1839 *
1840 * Notes: An I/O reference on the vnode must already be held on entry
1841 *
1842 * If there is no ubc_info associated with the vnode, this function
1843 * will return success.
1844 *
1845 * If a permission error occurs, this function will return
1846 * failure; all other failures will cause this function to return
1847 * success.
1848 *
1849 * IMPORTANT: This is an internal use function, and its symbols
1850 * are not exported, hence its error checking is not very robust.
1851 * It is primarily used by:
1852 *
1853 * o mmap(), when mapping a file
2d21ac55
A
1854 * o When mapping a shared file (a shared library in the
1855 * shared segment region)
1856 * o When loading a program image during the exec process
1857 *
1858 * ...all of these uses ignore the return code, and any fault that
1859 * results later because of a failure is handled in the fix-up path
1860 * of the fault handler. The interface exists primarily as a
1861 * performance hint.
1862 *
1863 * Given that third party implementation of the type of interfaces
1864 * that would use this function, such as alternative executable
1865 * formats, etc., are unsupported, this function is not exported
1866 * for general use.
1867 *
1868 * The extra reference is held until the VM system unmaps the
1869 * vnode from its own context to maintain a vnode reference in
1870 * cases like open()/mmap()/close(), which leave the backing
1871 * object referenced by a mapped memory region in a process
1872 * address space.
1c79356b 1873 */
91447636
A
1874__private_extern__ int
1875ubc_map(vnode_t vp, int flags)
1c79356b
A
1876{
1877 struct ubc_info *uip;
91447636
A
1878 int error = 0;
1879 int need_ref = 0;
2d21ac55 1880 int need_wakeup = 0;
1c79356b 1881
91447636 1882 if (UBCINFOEXISTS(vp)) {
2d21ac55
A
1883 vnode_lock(vp);
1884 uip = vp->v_ubcinfo;
1885
1886 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1887 SET(uip->ui_flags, UI_MAPWAITING);
1888 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 1889 PRIBIO, "ubc_map", NULL);
2d21ac55
A
1890 }
1891 SET(uip->ui_flags, UI_MAPBUSY);
1892 vnode_unlock(vp);
1893
1894 error = VNOP_MMAP(vp, flags, vfs_context_current());
1c79356b 1895
39037602
A
1896 /*
1897 * rdar://problem/22587101 required that we stop propagating
0a7de745 1898 * EPERM up the stack. Otherwise, we would have to funnel up
39037602 1899 * the error at all the call sites for memory_object_map().
0a7de745 1900 * The risk is in having to undo the map/object/entry state at
39037602
A
1901 * all these call sites. It would also affect more than just mmap()
1902 * e.g. vm_remap().
1903 *
1904 * if (error != EPERM)
0a7de745 1905 * error = 0;
39037602
A
1906 */
1907
1908 error = 0;
1c79356b 1909
2d21ac55 1910 vnode_lock_spin(vp);
1c79356b 1911
2d21ac55 1912 if (error == 0) {
0a7de745
A
1913 if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
1914 need_ref = 1;
1915 }
91447636 1916 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
22ba694c
A
1917 if (flags & PROT_WRITE) {
1918 SET(uip->ui_flags, UI_MAPPEDWRITE);
1919 }
2d21ac55
A
1920 }
1921 CLR(uip->ui_flags, UI_MAPBUSY);
55e303ae 1922
2d21ac55
A
1923 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1924 CLR(uip->ui_flags, UI_MAPWAITING);
1925 need_wakeup = 1;
55e303ae 1926 }
2d21ac55 1927 vnode_unlock(vp);
b4c24cb9 1928
0a7de745 1929 if (need_wakeup) {
2d21ac55 1930 wakeup(&uip->ui_flags);
0a7de745 1931 }
2d21ac55 1932
39037602
A
1933 if (need_ref) {
1934 /*
1935 * Make sure we get a ref as we can't unwind from here
1936 */
0a7de745 1937 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
39037602 1938 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
0a7de745 1939 }
39037602 1940 }
2d21ac55 1941 }
0a7de745 1942 return error;
0b4e3aa0
A
1943}
1944
2d21ac55 1945
0b4e3aa0 1946/*
2d21ac55
A
1947 * ubc_destroy_named
1948 *
1949 * Destroy the named memory object associated with the ubc_info control object
1950 * associated with the designated vnode, if there is a ubc_info associated
1951 * with the vnode, and a control object is associated with it
1952 *
1953 * Parameters: vp The designated vnode
1954 *
1955 * Returns: (void)
1956 *
1957 * Notes: This function is called on vnode termination for all vnodes,
1958 * and must therefore not assume that there is a ubc_info that is
1959 * associated with the vnode, nor that there is a control object
1960 * associated with the ubc_info.
1961 *
1962 * If all the conditions necessary are present, this function
1963 * calls memory_object_destory(), which will in turn end up
1964 * calling ubc_unmap() to release any vnode references that were
1965 * established via ubc_map().
1966 *
1967 * IMPORTANT: This is an internal use function that is used
1968 * exclusively by the internal use function vclean().
0b4e3aa0 1969 */
2d21ac55
A
1970__private_extern__ void
1971ubc_destroy_named(vnode_t vp)
0b4e3aa0
A
1972{
1973 memory_object_control_t control;
0b4e3aa0
A
1974 struct ubc_info *uip;
1975 kern_return_t kret;
1976
2d21ac55 1977 if (UBCINFOEXISTS(vp)) {
0a7de745 1978 uip = vp->v_ubcinfo;
2d21ac55
A
1979
1980 /* Terminate the memory object */
1981 control = ubc_getobject(vp, UBC_HOLDOBJECT);
1982 if (control != MEMORY_OBJECT_CONTROL_NULL) {
0a7de745
A
1983 kret = memory_object_destroy(control, 0);
1984 if (kret != KERN_SUCCESS) {
1985 panic("ubc_destroy_named: memory_object_destroy failed");
1986 }
0b4e3aa0
A
1987 }
1988 }
1c79356b
A
1989}
1990
0b4e3aa0 1991
1c79356b 1992/*
2d21ac55
A
1993 * ubc_isinuse
1994 *
1995 * Determine whether or not a vnode is currently in use by ubc at a level in
1996 * excess of the requested busycount
1997 *
1998 * Parameters: vp The vnode to check
1999 * busycount The threshold busy count, used to bias
2000 * the count usually already held by the
2001 * caller to avoid races
2002 *
2003 * Returns: 1 The vnode is in use over the threshold
2004 * 0 The vnode is not in use over the
2005 * threshold
2006 *
2007 * Notes: Because the vnode is only held locked while actually asking
2008 * the use count, this function only represents a snapshot of the
2009 * current state of the vnode. If more accurate information is
2010 * required, an additional busycount should be held by the caller
2011 * and a non-zero busycount used.
2012 *
2013 * If there is no ubc_info associated with the vnode, this
2014 * function will report that the vnode is not in use by ubc.
1c79356b
A
2015 */
2016int
91447636 2017ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 2018{
0a7de745
A
2019 if (!UBCINFOEXISTS(vp)) {
2020 return 0;
2021 }
2022 return ubc_isinuse_locked(vp, busycount, 0);
1c79356b
A
2023}
2024
91447636 2025
2d21ac55
A
2026/*
2027 * ubc_isinuse_locked
2028 *
2029 * Determine whether or not a vnode is currently in use by ubc at a level in
2030 * excess of the requested busycount
2031 *
2032 * Parameters: vp The vnode to check
2033 * busycount The threshold busy count, used to bias
2034 * the count usually already held by the
2035 * caller to avoid races
2036 * locked True if the vnode is already locked by
2037 * the caller
2038 *
2039 * Returns: 1 The vnode is in use over the threshold
2040 * 0 The vnode is not in use over the
2041 * threshold
2042 *
2043 * Notes: If the vnode is not locked on entry, it is locked while
2044 * actually asking the use count. If this is the case, this
2045 * function only represents a snapshot of the current state of
2046 * the vnode. If more accurate information is required, the
2047 * vnode lock should be held by the caller, otherwise an
2048 * additional busycount should be held by the caller and a
2049 * non-zero busycount used.
2050 *
2051 * If there is no ubc_info associated with the vnode, this
2052 * function will report that the vnode is not in use by ubc.
2053 */
1c79356b 2054int
91447636 2055ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 2056{
91447636 2057 int retval = 0;
1c79356b 2058
9bccf70c 2059
0a7de745 2060 if (!locked) {
b0d623f7 2061 vnode_lock_spin(vp);
0a7de745 2062 }
1c79356b 2063
0a7de745 2064 if ((vp->v_usecount - vp->v_kusecount) > busycount) {
91447636 2065 retval = 1;
0a7de745 2066 }
91447636 2067
0a7de745 2068 if (!locked) {
91447636 2069 vnode_unlock(vp);
0a7de745
A
2070 }
2071 return retval;
1c79356b
A
2072}
2073
91447636 2074
1c79356b 2075/*
2d21ac55
A
2076 * ubc_unmap
2077 *
2078 * Reverse the effects of a ubc_map() call for a given vnode
2079 *
2080 * Parameters: vp vnode to unmap from ubc
2081 *
2082 * Returns: (void)
2083 *
2084 * Notes: This is an internal use function used by vnode_pager_unmap().
2085 * It will attempt to obtain a reference on the supplied vnode,
2086 * and if it can do so, and there is an associated ubc_info, and
2087 * the flags indicate that it was mapped via ubc_map(), then the
2088 * flag is cleared, the mapping removed, and the reference taken
2089 * by ubc_map() is released.
2090 *
2091 * IMPORTANT: This MUST only be called by the VM
2092 * to prevent race conditions.
1c79356b 2093 */
0b4e3aa0 2094__private_extern__ void
1c79356b
A
2095ubc_unmap(struct vnode *vp)
2096{
2097 struct ubc_info *uip;
0a7de745
A
2098 int need_rele = 0;
2099 int need_wakeup = 0;
b0d623f7 2100
0a7de745
A
2101 if (vnode_getwithref(vp)) {
2102 return;
2103 }
1c79356b 2104
91447636 2105 if (UBCINFOEXISTS(vp)) {
fe8ab488
A
2106 bool want_fsevent = false;
2107
91447636 2108 vnode_lock(vp);
91447636 2109 uip = vp->v_ubcinfo;
2d21ac55
A
2110
2111 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2112 SET(uip->ui_flags, UI_MAPWAITING);
2113 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 2114 PRIBIO, "ubc_unmap", NULL);
2d21ac55
A
2115 }
2116 SET(uip->ui_flags, UI_MAPBUSY);
2117
91447636 2118 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
0a7de745 2119 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
fe8ab488 2120 want_fsevent = true;
0a7de745 2121 }
fe8ab488 2122
91447636 2123 need_rele = 1;
fe8ab488
A
2124
2125 /*
2126 * We want to clear the mapped flags after we've called
2127 * VNOP_MNOMAP to avoid certain races and allow
2128 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2129 */
91447636
A
2130 }
2131 vnode_unlock(vp);
fe8ab488 2132
91447636 2133 if (need_rele) {
0a7de745 2134 vfs_context_t ctx = vfs_context_current();
fe8ab488 2135
0a7de745 2136 (void)VNOP_MNOMAP(vp, ctx);
fe8ab488
A
2137
2138#if CONFIG_FSE
0a7de745
A
2139 /*
2140 * Why do we want an fsevent here? Normally the
2141 * content modified fsevent is posted when a file is
2142 * closed and only if it's written to via conventional
2143 * means. It's perfectly legal to close a file and
2144 * keep your mappings and we don't currently track
2145 * whether it was written to via a mapping.
2146 * Therefore, we need to post an fsevent here if the
2147 * file was mapped writable. This may result in false
2148 * events, i.e. we post a notification when nothing
2149 * has really changed.
2150 */
2151 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2152 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2153 FSE_ARG_VNODE, vp,
2154 FSE_ARG_DONE);
2155 }
fe8ab488
A
2156#endif
2157
0a7de745 2158 vnode_rele(vp);
91447636 2159 }
2d21ac55
A
2160
2161 vnode_lock_spin(vp);
2162
0a7de745 2163 if (need_rele) {
fe8ab488 2164 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
0a7de745 2165 }
fe8ab488 2166
2d21ac55 2167 CLR(uip->ui_flags, UI_MAPBUSY);
fe8ab488 2168
2d21ac55
A
2169 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2170 CLR(uip->ui_flags, UI_MAPWAITING);
2171 need_wakeup = 1;
2172 }
2173 vnode_unlock(vp);
2174
0a7de745
A
2175 if (need_wakeup) {
2176 wakeup(&uip->ui_flags);
2177 }
91447636
A
2178 }
2179 /*
2180 * the drop of the vnode ref will cleanup
2181 */
2182 vnode_put(vp);
0b4e3aa0
A
2183}
2184
2d21ac55
A
2185
2186/*
2187 * ubc_page_op
2188 *
2189 * Manipulate individual page state for a vnode with an associated ubc_info
2190 * with an associated memory object control.
2191 *
2192 * Parameters: vp The vnode backing the page
2193 * f_offset A file offset interior to the page
2194 * ops The operations to perform, as a bitmap
2195 * (see below for more information)
2196 * phys_entryp The address of a ppnum_t; may be NULL
2197 * to ignore
2198 * flagsp A pointer to an int to contain flags;
2199 * may be NULL to ignore
2200 *
2201 * Returns: KERN_SUCCESS Success
2202 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2203 * object associated
2204 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2205 * not physically contiguous
2206 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2207 * physically contiguous
2208 * KERN_FAILURE If the page cannot be looked up
2209 *
2210 * Implicit Returns:
2211 * *phys_entryp (modified) If phys_entryp is non-NULL and
2212 * UPL_POP_PHYSICAL
2213 * *flagsp (modified) If flagsp is non-NULL and there was
2214 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2215 *
2216 * Notes: For object boundaries, it is considerably more efficient to
2217 * ensure that f_offset is in fact on a page boundary, as this
2218 * will avoid internal use of the hash table to identify the
2219 * page, and would therefore skip a number of early optimizations.
2220 * Since this is a page operation anyway, the caller should try
2221 * to pass only a page aligned offset because of this.
2222 *
2223 * *flagsp may be modified even if this function fails. If it is
2224 * modified, it will contain the condition of the page before the
2225 * requested operation was attempted; these will only include the
2226 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2227 * UPL_POP_SET, or UPL_POP_CLR bits.
2228 *
2229 * The flags field may contain a specific operation, such as
2230 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2231 *
2232 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2233 * *phys_entryp and successful, set
2234 * *phys_entryp
2235 * o UPL_POP_DUMP Dump the specified page
2236 *
2237 * Otherwise, it is treated as a bitmap of one or more page
2238 * operations to perform on the final memory object; allowable
2239 * bit values are:
2240 *
2241 * o UPL_POP_DIRTY The page is dirty
2242 * o UPL_POP_PAGEOUT The page is paged out
2243 * o UPL_POP_PRECIOUS The page is precious
2244 * o UPL_POP_ABSENT The page is absent
2245 * o UPL_POP_BUSY The page is busy
2246 *
2247 * If the page status is only being queried and not modified, then
2248 * not other bits should be specified. However, if it is being
2249 * modified, exactly ONE of the following bits should be set:
2250 *
2251 * o UPL_POP_SET Set the current bitmap bits
2252 * o UPL_POP_CLR Clear the current bitmap bits
2253 *
2254 * Thus to effect a combination of setting an clearing, it may be
2255 * necessary to call this function twice. If this is done, the
2256 * set should be used before the clear, since clearing may trigger
2257 * a wakeup on the destination page, and if the page is backed by
2258 * an encrypted swap file, setting will trigger the decryption
2259 * needed before the wakeup occurs.
2260 */
0b4e3aa0
A
2261kern_return_t
2262ubc_page_op(
0a7de745
A
2263 struct vnode *vp,
2264 off_t f_offset,
2265 int ops,
2266 ppnum_t *phys_entryp,
2267 int *flagsp)
0b4e3aa0 2268{
0a7de745 2269 memory_object_control_t control;
0b4e3aa0
A
2270
2271 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2272 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2273 return KERN_INVALID_ARGUMENT;
0a7de745 2274 }
0b4e3aa0 2275
0a7de745
A
2276 return memory_object_page_op(control,
2277 (memory_object_offset_t)f_offset,
2278 ops,
2279 phys_entryp,
2280 flagsp);
0b4e3aa0 2281}
2d21ac55
A
2282
2283
2284/*
2285 * ubc_range_op
2286 *
2287 * Manipulate page state for a range of memory for a vnode with an associated
2288 * ubc_info with an associated memory object control, when page level state is
2289 * not required to be returned from the call (i.e. there are no phys_entryp or
2290 * flagsp parameters to this call, and it takes a range which may contain
2291 * multiple pages, rather than an offset interior to a single page).
2292 *
2293 * Parameters: vp The vnode backing the page
2294 * f_offset_beg A file offset interior to the start page
2295 * f_offset_end A file offset interior to the end page
2296 * ops The operations to perform, as a bitmap
2297 * (see below for more information)
2298 * range The address of an int; may be NULL to
2299 * ignore
2300 *
2301 * Returns: KERN_SUCCESS Success
2302 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2303 * object associated
2304 * KERN_INVALID_OBJECT If the object is physically contiguous
2305 *
2306 * Implicit Returns:
2307 * *range (modified) If range is non-NULL, its contents will
2308 * be modified to contain the number of
2309 * bytes successfully operated upon.
2310 *
2311 * Notes: IMPORTANT: This function cannot be used on a range that
2312 * consists of physically contiguous pages.
2313 *
2314 * For object boundaries, it is considerably more efficient to
2315 * ensure that f_offset_beg and f_offset_end are in fact on page
2316 * boundaries, as this will avoid internal use of the hash table
2317 * to identify the page, and would therefore skip a number of
2318 * early optimizations. Since this is an operation on a set of
2319 * pages anyway, the caller should try to pass only a page aligned
2320 * offsets because of this.
2321 *
2322 * *range will be modified only if this function succeeds.
2323 *
2324 * The flags field MUST contain a specific operation; allowable
2325 * values are:
2326 *
2327 * o UPL_ROP_ABSENT Returns the extent of the range
2328 * presented which is absent, starting
2329 * with the start address presented
2330 *
2331 * o UPL_ROP_PRESENT Returns the extent of the range
2332 * presented which is present (resident),
2333 * starting with the start address
2334 * presented
2335 * o UPL_ROP_DUMP Dump the pages which are found in the
2336 * target object for the target range.
2337 *
2338 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2339 * multiple regions in the range, only the first matching region
2340 * is returned.
2341 */
55e303ae
A
2342kern_return_t
2343ubc_range_op(
0a7de745
A
2344 struct vnode *vp,
2345 off_t f_offset_beg,
2346 off_t f_offset_end,
55e303ae
A
2347 int ops,
2348 int *range)
2349{
0a7de745 2350 memory_object_control_t control;
55e303ae
A
2351
2352 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2353 if (control == MEMORY_OBJECT_CONTROL_NULL) {
55e303ae 2354 return KERN_INVALID_ARGUMENT;
0a7de745 2355 }
55e303ae 2356
0a7de745
A
2357 return memory_object_range_op(control,
2358 (memory_object_offset_t)f_offset_beg,
2359 (memory_object_offset_t)f_offset_end,
2360 ops,
2361 range);
55e303ae 2362}
2d21ac55
A
2363
2364
2365/*
2366 * ubc_create_upl
2367 *
2368 * Given a vnode, cause the population of a portion of the vm_object; based on
2369 * the nature of the request, the pages returned may contain valid data, or
2370 * they may be uninitialized.
2371 *
2372 * Parameters: vp The vnode from which to create the upl
2373 * f_offset The start offset into the backing store
2374 * represented by the vnode
2375 * bufsize The size of the upl to create
2376 * uplp Pointer to the upl_t to receive the
2377 * created upl; MUST NOT be NULL
2378 * plp Pointer to receive the internal page
2379 * list for the created upl; MAY be NULL
2380 * to ignore
2381 *
2382 * Returns: KERN_SUCCESS The requested upl has been created
2383 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2384 * multiple of the page size
2385 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2386 * the vnode, or there is no memory object
0a7de745 2387 * control associated with the ubc_info
2d21ac55
A
2388 * memory_object_upl_request:KERN_INVALID_VALUE
2389 * The supplied upl_flags argument is
2390 * invalid
2391 * Implicit Returns:
0a7de745 2392 * *uplp (modified)
2d21ac55
A
2393 * *plp (modified) If non-NULL, the value of *plp will be
2394 * modified to point to the internal page
2395 * list; this modification may occur even
2396 * if this function is unsuccessful, in
2397 * which case the contents may be invalid
2398 *
2399 * Note: If successful, the returned *uplp MUST subsequently be freed
2400 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2401 * ubc_upl_abort(), or ubc_upl_abort_range().
2402 */
0b4e3aa0 2403kern_return_t
5ba3f43e 2404ubc_create_upl_external(
0a7de745
A
2405 struct vnode *vp,
2406 off_t f_offset,
2407 int bufsize,
2408 upl_t *uplp,
2409 upl_page_info_t **plp,
2410 int uplflags)
5ba3f43e 2411{
0a7de745 2412 return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
5ba3f43e
A
2413}
2414
2415kern_return_t
2416ubc_create_upl_kernel(
0a7de745
A
2417 struct vnode *vp,
2418 off_t f_offset,
2419 int bufsize,
2420 upl_t *uplp,
2421 upl_page_info_t **plp,
2422 int uplflags,
5ba3f43e 2423 vm_tag_t tag)
0b4e3aa0 2424{
0a7de745
A
2425 memory_object_control_t control;
2426 kern_return_t kr;
b0d623f7 2427
0a7de745 2428 if (plp != NULL) {
b0d623f7 2429 *plp = NULL;
0a7de745 2430 }
b0d623f7 2431 *uplp = NULL;
0a7de745
A
2432
2433 if (bufsize & 0xfff) {
0b4e3aa0 2434 return KERN_INVALID_ARGUMENT;
0a7de745 2435 }
0b4e3aa0 2436
0a7de745 2437 if (bufsize > MAX_UPL_SIZE_BYTES) {
6d2010ae 2438 return KERN_INVALID_ARGUMENT;
0a7de745 2439 }
6d2010ae 2440
b0d623f7 2441 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
b0d623f7
A
2442 if (uplflags & UPL_UBC_MSYNC) {
2443 uplflags &= UPL_RET_ONLY_DIRTY;
2444
2445 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
0a7de745 2446 UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7
A
2447 } else if (uplflags & UPL_UBC_PAGEOUT) {
2448 uplflags &= UPL_RET_ONLY_DIRTY;
2449
0a7de745 2450 if (uplflags & UPL_RET_ONLY_DIRTY) {
b0d623f7 2451 uplflags |= UPL_NOBLOCK;
0a7de745 2452 }
b0d623f7
A
2453
2454 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
0a7de745 2455 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7 2456 } else {
316670eb 2457 uplflags |= UPL_RET_ONLY_ABSENT |
0a7de745
A
2458 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2459 UPL_SET_INTERNAL | UPL_SET_LITE;
316670eb
A
2460
2461 /*
2462 * if the requested size == PAGE_SIZE, we don't want to set
2463 * the UPL_NOBLOCK since we may be trying to recover from a
2464 * previous partial pagein I/O that occurred because we were low
2465 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2466 * since we're only asking for a single page, we can block w/o fear
2467 * of tying up pages while waiting for more to become available
2468 */
0a7de745 2469 if (bufsize > PAGE_SIZE) {
316670eb 2470 uplflags |= UPL_NOBLOCK;
0a7de745 2471 }
b0d623f7
A
2472 }
2473 } else {
55e303ae 2474 uplflags &= ~UPL_FOR_PAGEOUT;
55e303ae 2475
b0d623f7
A
2476 if (uplflags & UPL_WILL_BE_DUMPED) {
2477 uplflags &= ~UPL_WILL_BE_DUMPED;
0a7de745
A
2478 uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2479 } else {
2480 uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2481 }
b0d623f7
A
2482 }
2483 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2484 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2485 return KERN_INVALID_ARGUMENT;
0a7de745 2486 }
0b4e3aa0 2487
5ba3f43e 2488 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
0a7de745 2489 if (kr == KERN_SUCCESS && plp != NULL) {
b0d623f7 2490 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
0a7de745 2491 }
0b4e3aa0
A
2492 return kr;
2493}
0a7de745
A
2494
2495
2d21ac55
A
2496/*
2497 * ubc_upl_maxbufsize
2498 *
2499 * Return the maximum bufsize ubc_create_upl( ) will take.
2500 *
2501 * Parameters: none
2502 *
2503 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2504 */
0a7de745 2505upl_size_t
2d21ac55
A
2506ubc_upl_maxbufsize(
2507 void)
2508{
0a7de745 2509 return MAX_UPL_SIZE_BYTES;
2d21ac55 2510}
0b4e3aa0 2511
2d21ac55
A
2512/*
2513 * ubc_upl_map
2514 *
2515 * Map the page list assocated with the supplied upl into the kernel virtual
2516 * address space at the virtual address indicated by the dst_addr argument;
2517 * the entire upl is mapped
2518 *
2519 * Parameters: upl The upl to map
2520 * dst_addr The address at which to map the upl
2521 *
2522 * Returns: KERN_SUCCESS The upl has been mapped
2523 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2524 * KERN_FAILURE The upl is already mapped
2525 * vm_map_enter:KERN_INVALID_ARGUMENT
2526 * A failure code from vm_map_enter() due
2527 * to an invalid argument
2528 */
0b4e3aa0
A
2529kern_return_t
2530ubc_upl_map(
0a7de745
A
2531 upl_t upl,
2532 vm_offset_t *dst_addr)
0b4e3aa0 2533{
0a7de745 2534 return vm_upl_map(kernel_map, upl, dst_addr);
0b4e3aa0
A
2535}
2536
2537
2d21ac55
A
2538/*
2539 * ubc_upl_unmap
2540 *
2541 * Unmap the page list assocated with the supplied upl from the kernel virtual
2542 * address space; the entire upl is unmapped.
2543 *
2544 * Parameters: upl The upl to unmap
2545 *
2546 * Returns: KERN_SUCCESS The upl has been unmapped
2547 * KERN_FAILURE The upl is not currently mapped
2548 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2549 */
0b4e3aa0
A
2550kern_return_t
2551ubc_upl_unmap(
0a7de745 2552 upl_t upl)
0b4e3aa0 2553{
0a7de745 2554 return vm_upl_unmap(kernel_map, upl);
0b4e3aa0
A
2555}
2556
2d21ac55
A
2557
2558/*
2559 * ubc_upl_commit
2560 *
2561 * Commit the contents of the upl to the backing store
2562 *
2563 * Parameters: upl The upl to commit
2564 *
2565 * Returns: KERN_SUCCESS The upl has been committed
2566 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2567 * KERN_FAILURE The supplied upl does not represent
2568 * device memory, and the offset plus the
2569 * size would exceed the actual size of
2570 * the upl
2571 *
2572 * Notes: In practice, the only return value for this function should be
2573 * KERN_SUCCESS, unless there has been data structure corruption;
2574 * since the upl is deallocated regardless of success or failure,
2575 * there's really nothing to do about this other than panic.
2576 *
2577 * IMPORTANT: Use of this function should not be mixed with use of
2578 * ubc_upl_commit_range(), due to the unconditional deallocation
2579 * by this function.
2580 */
0b4e3aa0
A
2581kern_return_t
2582ubc_upl_commit(
0a7de745 2583 upl_t upl)
0b4e3aa0 2584{
0a7de745
A
2585 upl_page_info_t *pl;
2586 kern_return_t kr;
0b4e3aa0
A
2587
2588 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
fe8ab488 2589 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
0b4e3aa0
A
2590 upl_deallocate(upl);
2591 return kr;
1c79356b
A
2592}
2593
0b4e3aa0 2594
2d21ac55
A
2595/*
2596 * ubc_upl_commit
2597 *
2598 * Commit the contents of the specified range of the upl to the backing store
2599 *
2600 * Parameters: upl The upl to commit
2601 * offset The offset into the upl
2602 * size The size of the region to be committed,
2603 * starting at the specified offset
2604 * flags commit type (see below)
2605 *
2606 * Returns: KERN_SUCCESS The range has been committed
2607 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2608 * KERN_FAILURE The supplied upl does not represent
2609 * device memory, and the offset plus the
2610 * size would exceed the actual size of
2611 * the upl
2612 *
2613 * Notes: IMPORTANT: If the commit is successful, and the object is now
2614 * empty, the upl will be deallocated. Since the caller cannot
2615 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2616 * should generally only be used when the offset is 0 and the size
2617 * is equal to the upl size.
2618 *
2619 * The flags argument is a bitmap of flags on the rage of pages in
2620 * the upl to be committed; allowable flags are:
2621 *
2622 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2623 * both empty and has been
2624 * successfully committed
2625 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2626 * bit; will prevent a
2627 * later pageout
2628 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2629 * bit; will cause a later
2630 * pageout
2631 * o UPL_COMMIT_INACTIVATE Clear each pages
2632 * reference bit; the page
2633 * will not be accessed
2634 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2635 * become busy when an
2636 * IOMemoryDescriptor is
2637 * mapped or redirected,
2638 * and we have to wait for
2639 * an IOKit driver
2640 *
2641 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2642 * not be specified by the caller.
2643 *
2644 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2645 * mutually exclusive, and should not be combined.
2646 */
0b4e3aa0
A
2647kern_return_t
2648ubc_upl_commit_range(
0a7de745
A
2649 upl_t upl,
2650 upl_offset_t offset,
2651 upl_size_t size,
2652 int flags)
0b4e3aa0 2653{
0a7de745
A
2654 upl_page_info_t *pl;
2655 boolean_t empty;
2656 kern_return_t kr;
0b4e3aa0 2657
0a7de745 2658 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
0b4e3aa0 2659 flags |= UPL_COMMIT_NOTIFY_EMPTY;
0a7de745 2660 }
0b4e3aa0 2661
593a1d5f
A
2662 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2663 return KERN_INVALID_ARGUMENT;
2664 }
2665
0b4e3aa0
A
2666 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2667
2668 kr = upl_commit_range(upl, offset, size, flags,
0a7de745 2669 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
0b4e3aa0 2670
0a7de745 2671 if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2672 upl_deallocate(upl);
0a7de745 2673 }
0b4e3aa0
A
2674
2675 return kr;
2676}
2d21ac55
A
2677
2678
2679/*
2680 * ubc_upl_abort_range
2681 *
2682 * Abort the contents of the specified range of the specified upl
2683 *
2684 * Parameters: upl The upl to abort
2685 * offset The offset into the upl
2686 * size The size of the region to be aborted,
2687 * starting at the specified offset
2688 * abort_flags abort type (see below)
2689 *
2690 * Returns: KERN_SUCCESS The range has been aborted
2691 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2692 * KERN_FAILURE The supplied upl does not represent
2693 * device memory, and the offset plus the
2694 * size would exceed the actual size of
2695 * the upl
2696 *
2697 * Notes: IMPORTANT: If the abort is successful, and the object is now
2698 * empty, the upl will be deallocated. Since the caller cannot
2699 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2700 * should generally only be used when the offset is 0 and the size
2701 * is equal to the upl size.
2702 *
2703 * The abort_flags argument is a bitmap of flags on the range of
2704 * pages in the upl to be aborted; allowable flags are:
2705 *
2706 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2707 * empty and has been successfully
2708 * aborted
2709 * o UPL_ABORT_RESTART The operation must be restarted
2710 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2711 * o UPL_ABORT_ERROR An I/O error occurred
2712 * o UPL_ABORT_DUMP_PAGES Just free the pages
2713 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2714 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2715 *
2716 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2717 * not be specified by the caller. It is intended to fulfill the
2718 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2719 * ubc_upl_commit_range(), but is never referenced internally.
2720 *
2721 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2722 * referenced; do not use it.
2723 */
0b4e3aa0
A
2724kern_return_t
2725ubc_upl_abort_range(
0a7de745
A
2726 upl_t upl,
2727 upl_offset_t offset,
2728 upl_size_t size,
2729 int abort_flags)
0b4e3aa0 2730{
0a7de745
A
2731 kern_return_t kr;
2732 boolean_t empty = FALSE;
0b4e3aa0 2733
0a7de745 2734 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
0b4e3aa0 2735 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
0a7de745 2736 }
0b4e3aa0
A
2737
2738 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2739
0a7de745 2740 if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2741 upl_deallocate(upl);
0a7de745 2742 }
0b4e3aa0
A
2743
2744 return kr;
2745}
2746
2d21ac55
A
2747
2748/*
2749 * ubc_upl_abort
2750 *
2751 * Abort the contents of the specified upl
2752 *
2753 * Parameters: upl The upl to abort
2754 * abort_type abort type (see below)
2755 *
2756 * Returns: KERN_SUCCESS The range has been aborted
2757 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2758 * KERN_FAILURE The supplied upl does not represent
2759 * device memory, and the offset plus the
2760 * size would exceed the actual size of
2761 * the upl
2762 *
2763 * Notes: IMPORTANT: If the abort is successful, and the object is now
2764 * empty, the upl will be deallocated. Since the caller cannot
2765 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2766 * should generally only be used when the offset is 0 and the size
2767 * is equal to the upl size.
2768 *
2769 * The abort_type is a bitmap of flags on the range of
2770 * pages in the upl to be aborted; allowable flags are:
2771 *
2772 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2773 * empty and has been successfully
2774 * aborted
2775 * o UPL_ABORT_RESTART The operation must be restarted
2776 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2777 * o UPL_ABORT_ERROR An I/O error occurred
2778 * o UPL_ABORT_DUMP_PAGES Just free the pages
2779 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2780 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2781 *
2782 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2783 * not be specified by the caller. It is intended to fulfill the
2784 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2785 * ubc_upl_commit_range(), but is never referenced internally.
2786 *
2787 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2788 * referenced; do not use it.
2789 */
0b4e3aa0
A
2790kern_return_t
2791ubc_upl_abort(
0a7de745
A
2792 upl_t upl,
2793 int abort_type)
0b4e3aa0 2794{
0a7de745 2795 kern_return_t kr;
0b4e3aa0
A
2796
2797 kr = upl_abort(upl, abort_type);
2798 upl_deallocate(upl);
2799 return kr;
2800}
2801
2d21ac55
A
2802
2803/*
2804 * ubc_upl_pageinfo
2805 *
2806 * Retrieve the internal page list for the specified upl
2807 *
2808 * Parameters: upl The upl to obtain the page list from
2809 *
2810 * Returns: !NULL The (upl_page_info_t *) for the page
2811 * list internal to the upl
2812 * NULL Error/no page list associated
2813 *
2814 * Notes: IMPORTANT: The function is only valid on internal objects
2815 * where the list request was made with the UPL_INTERNAL flag.
2816 *
2817 * This function is a utility helper function, since some callers
2818 * may not have direct access to the header defining the macro,
2819 * due to abstraction layering constraints.
2820 */
0b4e3aa0
A
2821upl_page_info_t *
2822ubc_upl_pageinfo(
0a7de745
A
2823 upl_t upl)
2824{
2825 return UPL_GET_INTERNAL_PAGE_LIST(upl);
0b4e3aa0 2826}
91447636 2827
91447636 2828
0a7de745 2829int
fe8ab488 2830UBCINFOEXISTS(const struct vnode * vp)
91447636 2831{
0a7de745 2832 return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
91447636
A
2833}
2834
2d21ac55 2835
316670eb
A
2836void
2837ubc_upl_range_needed(
0a7de745
A
2838 upl_t upl,
2839 int index,
2840 int count)
316670eb
A
2841{
2842 upl_range_needed(upl, index, count);
2843}
2844
0a7de745
A
2845boolean_t
2846ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
fe8ab488 2847{
0a7de745 2848 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
fe8ab488 2849 return FALSE;
0a7de745
A
2850 }
2851 if (writable) {
fe8ab488 2852 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
0a7de745 2853 }
fe8ab488
A
2854 return TRUE;
2855}
2856
0a7de745
A
2857boolean_t
2858ubc_is_mapped_writable(const struct vnode *vp)
fe8ab488
A
2859{
2860 boolean_t writable;
2861 return ubc_is_mapped(vp, &writable) && writable;
2862}
2863
316670eb 2864
2d21ac55
A
2865/*
2866 * CODE SIGNING
2867 */
2d21ac55
A
2868static volatile SInt32 cs_blob_size = 0;
2869static volatile SInt32 cs_blob_count = 0;
2870static SInt32 cs_blob_size_peak = 0;
2871static UInt32 cs_blob_size_max = 0;
2872static SInt32 cs_blob_count_peak = 0;
2d21ac55 2873
6d2010ae
A
2874SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
2875SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
2876SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2877SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
2878SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
2d21ac55 2879
3e170ce0
A
2880/*
2881 * Function: csblob_parse_teamid
2882 *
2883 * Description: This function returns a pointer to the team id
0a7de745
A
2884 * stored within the codedirectory of the csblob.
2885 * If the codedirectory predates team-ids, it returns
2886 * NULL.
2887 * This does not copy the name but returns a pointer to
2888 * it within the CD. Subsequently, the CD must be
2889 * available when this is used.
2890 */
3e170ce0
A
2891
2892static const char *
2893csblob_parse_teamid(struct cs_blob *csblob)
2894{
2895 const CS_CodeDirectory *cd;
2896
490019cf 2897 cd = csblob->csb_cd;
3e170ce0 2898
0a7de745 2899 if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3e170ce0 2900 return NULL;
0a7de745 2901 }
3e170ce0 2902
0a7de745 2903 if (cd->teamOffset == 0) {
3e170ce0 2904 return NULL;
0a7de745 2905 }
3e170ce0
A
2906
2907 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
0a7de745 2908 if (cs_debug > 1) {
3e170ce0 2909 printf("found team-id %s in cdblob\n", name);
0a7de745 2910 }
3e170ce0
A
2911
2912 return name;
2913}
2914
39236c6e 2915
593a1d5f
A
2916kern_return_t
2917ubc_cs_blob_allocate(
0a7de745
A
2918 vm_offset_t *blob_addr_p,
2919 vm_size_t *blob_size_p)
593a1d5f 2920{
0a7de745 2921 kern_return_t kr = KERN_FAILURE;
593a1d5f 2922
d9a64523
A
2923 {
2924 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2925
2926 if (*blob_addr_p == 0) {
2927 kr = KERN_NO_SPACE;
2928 } else {
2929 kr = KERN_SUCCESS;
2930 }
593a1d5f 2931 }
d9a64523 2932
593a1d5f
A
2933 return kr;
2934}
2935
2936void
2937ubc_cs_blob_deallocate(
0a7de745
A
2938 vm_offset_t blob_addr,
2939 vm_size_t blob_size)
593a1d5f 2940{
d9a64523
A
2941#if PMAP_CS
2942 if (blob_size > pmap_cs_blob_limit) {
2943 kmem_free(kernel_map, blob_addr, blob_size);
2944 } else
2945#endif
2946 {
0a7de745 2947 kfree(blob_addr, blob_size);
d9a64523 2948 }
39037602
A
2949}
2950
2951/*
2952 * Some codesigned files use a lowest common denominator page size of
2953 * 4KiB, but can be used on systems that have a runtime page size of
2954 * 16KiB. Since faults will only occur on 16KiB ranges in
2955 * cs_validate_range(), we can convert the original Code Directory to
2956 * a multi-level scheme where groups of 4 hashes are combined to form
2957 * a new hash, which represents 16KiB in the on-disk file. This can
2958 * reduce the wired memory requirement for the Code Directory by
2959 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2960 * for unaligned access, which may still attempt to validate on
2961 * non-16KiB multiples for compatibility with 3rd party binaries.
2962 */
2963static boolean_t
2964ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
2965{
2966 const CS_CodeDirectory *cd;
2967
0a7de745 2968
39037602
A
2969 /*
2970 * Only applies to binaries that ship as part of the OS,
2971 * primarily the shared cache.
2972 */
2973 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
2974 return FALSE;
2975 }
2976
2977 /*
2978 * If the runtime page size matches the code signing page
2979 * size, there is no work to do.
2980 */
2981 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
2982 return FALSE;
2983 }
2984
2985 cd = blob->csb_cd;
2986
2987 /*
2988 * There must be a valid integral multiple of hashes
2989 */
2990 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2991 return FALSE;
2992 }
2993
2994 /*
2995 * Scatter lists must also have ranges that have an integral number of hashes
2996 */
2997 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 2998 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 2999 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3000 /* iterate all scatter structs to make sure they are all aligned */
3001 do {
3002 uint32_t sbase = ntohl(scatter->base);
3003 uint32_t scount = ntohl(scatter->count);
3004
3005 /* last scatter? */
3006 if (scount == 0) {
3007 break;
3008 }
3009
3010 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3011 return FALSE;
3012 }
3013
3014 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3015 return FALSE;
3016 }
3017
3018 scatter++;
0a7de745 3019 } while (1);
39037602
A
3020 }
3021
3022 /* Covered range must be a multiple of the new page size */
3023 if (ntohl(cd->codeLimit) & PAGE_MASK) {
3024 return FALSE;
3025 }
3026
3027 /* All checks pass */
3028 return TRUE;
3029}
3030
3031/*
d9a64523
A
3032 * Given a cs_blob with an already chosen best code directory, this
3033 * function allocates memory and copies into it only the blobs that
3034 * will be needed by the kernel, namely the single chosen code
3035 * directory (and not any of its alternatives) and the entitlement
3036 * blob.
3037 *
3038 * This saves significant memory with agile signatures, and additional
3039 * memory for 3rd Party Code because we also omit the CMS blob.
3040 *
3041 * To support multilevel and other potential code directory rewriting,
3042 * the size of a new code directory can be specified. Since that code
3043 * directory will replace the existing code directory,
3044 * ubc_cs_reconstitute_code_signature does not copy the original code
3045 * directory when a size is given, and the caller must fill it in.
39037602 3046 */
d9a64523
A
3047static int
3048ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
0a7de745
A
3049 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
3050 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
39037602 3051{
0a7de745
A
3052 const CS_CodeDirectory *old_cd, *cd;
3053 CS_CodeDirectory *new_cd;
39037602
A
3054 const CS_GenericBlob *entitlements;
3055 vm_offset_t new_blob_addr;
3056 vm_size_t new_blob_size;
3057 vm_size_t new_cdsize;
0a7de745
A
3058 kern_return_t kr;
3059 int error;
39037602
A
3060
3061 old_cd = blob->csb_cd;
3062
d9a64523 3063 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
39037602
A
3064
3065 new_blob_size = sizeof(CS_SuperBlob);
3066 new_blob_size += sizeof(CS_BlobIndex);
3067 new_blob_size += new_cdsize;
3068
3069 if (blob->csb_entitlements_blob) {
3070 /* We need to add a slot for the entitlements */
3071 new_blob_size += sizeof(CS_BlobIndex);
3072 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
3073 }
3074
3075 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
3076 if (kr != KERN_SUCCESS) {
3077 if (cs_debug > 1) {
3078 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
0a7de745 3079 kr);
39037602 3080 }
d9a64523 3081 return ENOMEM;
39037602
A
3082 }
3083
0a7de745 3084 CS_SuperBlob *new_superblob;
39037602
A
3085
3086 new_superblob = (CS_SuperBlob *)new_blob_addr;
3087 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3088 new_superblob->length = htonl((uint32_t)new_blob_size);
3089 if (blob->csb_entitlements_blob) {
0a7de745 3090 vm_size_t ent_offset, cd_offset;
39037602
A
3091
3092 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
3093 ent_offset = cd_offset + new_cdsize;
3094
3095 new_superblob->count = htonl(2);
3096 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3097 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3098 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3099 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3100
3101 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3102
3103 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3104 } else {
d9a64523
A
3105 // Blob is the code directory, directly.
3106 new_cd = (CS_CodeDirectory *)new_blob_addr;
3107 }
39037602 3108
d9a64523
A
3109 if (optional_new_cd_size == 0) {
3110 // Copy code directory, and revalidate.
3111 memcpy(new_cd, old_cd, new_cdsize);
39037602 3112
d9a64523 3113 vm_size_t length = new_blob_size;
39037602 3114
d9a64523
A
3115 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3116
3117 if (error) {
3118 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3119 error);
d9a64523
A
3120
3121 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3122 return error;
3123 }
3124 *new_entitlements_p = entitlements;
3125 } else {
3126 // Caller will fill out and validate code directory.
3127 memset(new_cd, 0, new_cdsize);
3128 *new_entitlements_p = NULL;
3129 }
3130
3131 *new_blob_addr_p = new_blob_addr;
3132 *new_blob_size_p = new_blob_size;
3133 *new_cd_p = new_cd;
3134
3135 return 0;
3136}
3137
3138static int
3139ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3140{
0a7de745
A
3141 const CS_CodeDirectory *old_cd, *cd;
3142 CS_CodeDirectory *new_cd;
d9a64523
A
3143 const CS_GenericBlob *entitlements;
3144 vm_offset_t new_blob_addr;
3145 vm_size_t new_blob_size;
3146 vm_size_t new_cdsize;
0a7de745 3147 int error;
d9a64523 3148
0a7de745 3149 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
d9a64523
A
3150
3151 if (cs_debug > 1) {
3152 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
0a7de745 3153 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
d9a64523
A
3154 }
3155
3156 old_cd = blob->csb_cd;
3157
3158 /* Up to the hashes, we can copy all data */
3159 new_cdsize = ntohl(old_cd->hashOffset);
3160 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3161
3162 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
0a7de745
A
3163 &new_blob_addr, &new_blob_size, &new_cd,
3164 &entitlements);
d9a64523
A
3165 if (error != 0) {
3166 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3167 return error;
39037602
A
3168 }
3169
3170 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3171
3172 /* Update fields in the Code Directory structure */
3173 new_cd->length = htonl((uint32_t)new_cdsize);
3174
3175 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3176 nCodeSlots >>= hashes_per_new_hash_shift;
3177 new_cd->nCodeSlots = htonl(nCodeSlots);
3178
3179 new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
3180
3181 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3182 SC_Scatter *scatter = (SC_Scatter*)
0a7de745 3183 ((char *)new_cd + ntohl(new_cd->scatterOffset));
39037602
A
3184 /* iterate all scatter structs to scale their counts */
3185 do {
3186 uint32_t scount = ntohl(scatter->count);
3187 uint32_t sbase = ntohl(scatter->base);
3188
3189 /* last scatter? */
3190 if (scount == 0) {
3191 break;
3192 }
3193
3194 scount >>= hashes_per_new_hash_shift;
3195 scatter->count = htonl(scount);
3196
3197 sbase >>= hashes_per_new_hash_shift;
3198 scatter->base = htonl(sbase);
3199
3200 scatter++;
0a7de745 3201 } while (1);
39037602
A
3202 }
3203
3204 /* For each group of hashes, hash them together */
3205 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3206 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3207
3208 uint32_t hash_index;
3209 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
0a7de745 3210 union cs_hash_union mdctx;
39037602
A
3211
3212 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3213 const unsigned char *src = src_base + hash_index * source_hash_len;
3214 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3215
3216 blob->csb_hashtype->cs_init(&mdctx);
3217 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3218 blob->csb_hashtype->cs_final(dst, &mdctx);
3219 }
3220
d9a64523
A
3221 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3222 if (error != 0) {
d9a64523 3223 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3224 error);
39037602
A
3225
3226 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
d9a64523 3227 return error;
39037602
A
3228 }
3229
0a7de745 3230 /* New Code Directory is ready for use, swap it out in the blob structure */
39037602
A
3231 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3232
3233 blob->csb_mem_size = new_blob_size;
3234 blob->csb_mem_kaddr = new_blob_addr;
3235 blob->csb_cd = cd;
3236 blob->csb_entitlements_blob = entitlements;
3237
3238 /* The blob has some cached attributes of the Code Directory, so update those */
3239
3240 blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
3241
3242 blob->csb_hash_pagesize = PAGE_SIZE;
3243 blob->csb_hash_pagemask = PAGE_MASK;
3244 blob->csb_hash_pageshift = PAGE_SHIFT;
3245 blob->csb_end_offset = ntohl(cd->codeLimit);
0a7de745 3246 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 3247 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3248 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3249 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3250 } else {
3251 blob->csb_start_offset = 0;
3252 }
d9a64523
A
3253
3254 return 0;
593a1d5f 3255}
39236c6e 3256
d9a64523
A
3257/*
3258 * Validate the code signature blob, create a struct cs_blob wrapper
3259 * and return it together with a pointer to the chosen code directory
3260 * and entitlements blob.
3261 *
3262 * Note that this takes ownership of the memory as addr, mainly because
3263 * this function can actually replace the passed in blob with another
3264 * one, e.g. when performing multilevel hashing optimization.
3265 */
2d21ac55 3266int
d9a64523
A
3267cs_blob_create_validated(
3268 vm_address_t * const addr,
3269 vm_size_t size,
3270 struct cs_blob ** const ret_blob,
0a7de745 3271 CS_CodeDirectory const ** const ret_cd)
91447636 3272{
0a7de745
A
3273 struct cs_blob *blob;
3274 int error = EINVAL;
2d21ac55 3275 const CS_CodeDirectory *cd;
39037602 3276 const CS_GenericBlob *entitlements;
0a7de745
A
3277 union cs_hash_union mdctx;
3278 size_t length;
15129b1c 3279
0a7de745
A
3280 if (ret_blob) {
3281 *ret_blob = NULL;
3282 }
2d21ac55 3283
0a7de745 3284 blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
2d21ac55
A
3285 if (blob == NULL) {
3286 return ENOMEM;
3287 }
3288
2d21ac55 3289 /* fill in the new blob */
2d21ac55
A
3290 blob->csb_mem_size = size;
3291 blob->csb_mem_offset = 0;
39037602 3292 blob->csb_mem_kaddr = *addr;
39236c6e 3293 blob->csb_flags = 0;
5ba3f43e 3294 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
fe8ab488 3295 blob->csb_platform_binary = 0;
3e170ce0 3296 blob->csb_platform_path = 0;
fe8ab488 3297 blob->csb_teamid = NULL;
39037602
A
3298 blob->csb_entitlements_blob = NULL;
3299 blob->csb_entitlements = NULL;
d9a64523
A
3300 blob->csb_reconstituted = false;
3301
39037602
A
3302 /* Transfer ownership. Even on error, this function will deallocate */
3303 *addr = 0;
3304
2d21ac55
A
3305 /*
3306 * Validate the blob's contents
3307 */
813fb2f6
A
3308 length = (size_t) size;
3309 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 3310 length, &cd, &entitlements);
39236c6e 3311 if (error) {
0a7de745 3312 if (cs_debug) {
39236c6e 3313 printf("CODESIGNING: csblob invalid: %d\n", error);
0a7de745 3314 }
813fb2f6
A
3315 /*
3316 * The vnode checker can't make the rest of this function
3317 * succeed if csblob validation failed, so bail */
3318 goto out;
2d21ac55 3319 } else {
3e170ce0
A
3320 const unsigned char *md_base;
3321 uint8_t hash[CS_HASH_MAX_SIZE];
3322 int md_size;
3323
490019cf 3324 blob->csb_cd = cd;
39037602 3325 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3e170ce0 3326 blob->csb_hashtype = cs_find_md(cd->hashType);
0a7de745 3327 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
3e170ce0 3328 panic("validated CodeDirectory but unsupported type");
0a7de745 3329 }
39037602
A
3330
3331 blob->csb_hash_pageshift = cd->pageSize;
3332 blob->csb_hash_pagesize = (1U << cd->pageSize);
3333 blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
3334 blob->csb_hash_firstlevel_pagesize = 0;
39236c6e 3335 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
39037602 3336 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
0a7de745 3337 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39236c6e 3338 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3339 ((const char*)cd + ntohl(cd->scatterOffset));
39037602 3340 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
b0d623f7 3341 } else {
3e170ce0 3342 blob->csb_start_offset = 0;
b0d623f7 3343 }
3e170ce0
A
3344 /* compute the blob's cdhash */
3345 md_base = (const unsigned char *) cd;
3346 md_size = ntohl(cd->length);
3347
3348 blob->csb_hashtype->cs_init(&mdctx);
3349 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3350 blob->csb_hashtype->cs_final(hash, &mdctx);
3351
3352 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
2d21ac55
A
3353 }
3354
0a7de745 3355 error = 0;
d9a64523
A
3356
3357out:
0a7de745
A
3358 if (error != 0) {
3359 cs_blob_free(blob);
3360 blob = NULL;
3361 cd = NULL;
3362 }
3363
3364 if (ret_blob != NULL) {
3365 *ret_blob = blob;
3366 }
3367 if (ret_cd != NULL) {
3368 *ret_cd = cd;
3369 }
3370
3371 return error;
d9a64523
A
3372}
3373
3374/*
3375 * Free a cs_blob previously created by cs_blob_create_validated.
3376 */
3377void
3378cs_blob_free(
0a7de745 3379 struct cs_blob * const blob)
d9a64523 3380{
0a7de745
A
3381 if (blob != NULL) {
3382 if (blob->csb_mem_kaddr) {
3383 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3384 blob->csb_mem_kaddr = 0;
3385 }
3386 if (blob->csb_entitlements != NULL) {
3387 osobject_release(blob->csb_entitlements);
3388 blob->csb_entitlements = NULL;
3389 }
3390 (kfree)(blob, sizeof(*blob));
3391 }
d9a64523
A
3392}
3393
3394int
3395ubc_cs_blob_add(
0a7de745
A
3396 struct vnode *vp,
3397 cpu_type_t cputype,
3398 off_t base_offset,
3399 vm_address_t *addr,
3400 vm_size_t size,
d9a64523 3401 struct image_params *imgp,
0a7de745
A
3402 __unused int flags,
3403 struct cs_blob **ret_blob)
d9a64523 3404{
0a7de745
A
3405 kern_return_t kr;
3406 struct ubc_info *uip;
3407 struct cs_blob *blob, *oblob;
3408 int error;
d9a64523 3409 CS_CodeDirectory const *cd;
0a7de745
A
3410 off_t blob_start_offset, blob_end_offset;
3411 boolean_t record_mtime;
d9a64523
A
3412
3413 record_mtime = FALSE;
0a7de745
A
3414 if (ret_blob) {
3415 *ret_blob = NULL;
3416 }
3417
3418 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3419 * Validates the passed in blob in the process. */
3420 error = cs_blob_create_validated(addr, size, &blob, &cd);
3421
3422 if (error != 0) {
d9a64523 3423 printf("malform code signature blob: %d\n", error);
0a7de745
A
3424 return error;
3425 }
d9a64523 3426
0a7de745 3427 blob->csb_cpu_type = cputype;
d9a64523
A
3428 blob->csb_base_offset = base_offset;
3429
3430 /*
593a1d5f
A
3431 * Let policy module check whether the blob's signature is accepted.
3432 */
3433#if CONFIG_MACF
0a7de745 3434 unsigned int cs_flags = blob->csb_flags;
5ba3f43e
A
3435 unsigned int signer_type = blob->csb_signer_type;
3436 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
0a7de745 3437 blob->csb_flags = cs_flags;
5ba3f43e 3438 blob->csb_signer_type = signer_type;
39037602 3439
fe8ab488 3440 if (error) {
0a7de745 3441 if (cs_debug) {
fe8ab488 3442 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
0a7de745 3443 }
593a1d5f 3444 goto out;
fe8ab488 3445 }
39037602 3446 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
0a7de745 3447 if (cs_debug) {
c18c124e 3448 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
0a7de745 3449 }
c18c124e
A
3450 error = EPERM;
3451 goto out;
3452 }
5ba3f43e
A
3453#endif
3454
d9a64523
A
3455#if CONFIG_ENFORCE_SIGNED_CODE
3456 /*
3457 * Reconstitute code signature
3458 */
3459 {
3460 vm_address_t new_mem_kaddr = 0;
3461 vm_size_t new_mem_size = 0;
3462
3463 CS_CodeDirectory *new_cd = NULL;
3464 CS_GenericBlob const *new_entitlements = NULL;
3465
3466 error = ubc_cs_reconstitute_code_signature(blob, 0,
0a7de745
A
3467 &new_mem_kaddr, &new_mem_size,
3468 &new_cd, &new_entitlements);
d9a64523
A
3469
3470 if (error != 0) {
3471 printf("failed code signature reconstitution: %d\n", error);
3472 goto out;
3473 }
3474
3475 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3476
3477 blob->csb_mem_kaddr = new_mem_kaddr;
3478 blob->csb_mem_size = new_mem_size;
3479 blob->csb_cd = new_cd;
3480 blob->csb_entitlements_blob = new_entitlements;
3481 blob->csb_reconstituted = true;
3482 }
3483
3484#endif
3485
3486
39037602 3487 if (blob->csb_flags & CS_PLATFORM_BINARY) {
0a7de745 3488 if (cs_debug > 1) {
fe8ab488 3489 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
0a7de745 3490 }
fe8ab488 3491 blob->csb_platform_binary = 1;
39037602 3492 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
fe8ab488
A
3493 } else {
3494 blob->csb_platform_binary = 0;
3e170ce0
A
3495 blob->csb_platform_path = 0;
3496 blob->csb_teamid = csblob_parse_teamid(blob);
fe8ab488 3497 if (cs_debug > 1) {
0a7de745 3498 if (blob->csb_teamid) {
fe8ab488 3499 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
0a7de745 3500 } else {
fe8ab488 3501 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
0a7de745 3502 }
fe8ab488
A
3503 }
3504 }
39037602 3505
2d21ac55
A
3506 /*
3507 * Validate the blob's coverage
3508 */
3509 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3510 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3511
cf7d32b8
A
3512 if (blob_start_offset >= blob_end_offset ||
3513 blob_start_offset < 0 ||
3514 blob_end_offset <= 0) {
2d21ac55
A
3515 /* reject empty or backwards blob */
3516 error = EINVAL;
3517 goto out;
3518 }
3519
39037602 3520 if (ubc_cs_supports_multilevel_hash(blob)) {
d9a64523
A
3521 error = ubc_cs_convert_to_multilevel_hash(blob);
3522 if (error != 0) {
3523 printf("failed multilevel hash conversion: %d\n", error);
3524 goto out;
3525 }
3526 blob->csb_reconstituted = true;
39037602
A
3527 }
3528
2d21ac55 3529 vnode_lock(vp);
0a7de745 3530 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3531 vnode_unlock(vp);
3532 error = ENOENT;
3533 goto out;
3534 }
3535 uip = vp->v_ubcinfo;
3536
3537 /* check if this new blob overlaps with an existing blob */
3538 for (oblob = uip->cs_blobs;
0a7de745
A
3539 oblob != NULL;
3540 oblob = oblob->csb_next) {
3541 off_t oblob_start_offset, oblob_end_offset;
3542
3543 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3544 vnode_unlock(vp);
3545 error = EALREADY;
3546 goto out;
3547 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
3548 if (!oblob->csb_platform_binary) {
3549 vnode_unlock(vp);
3550 error = EALREADY;
3551 goto out;
3552 }
3553 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3554 if (oblob->csb_platform_binary ||
fe8ab488
A
3555 oblob->csb_teamid == NULL ||
3556 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3557 vnode_unlock(vp);
3558 error = EALREADY;
3559 goto out;
3560 }
0a7de745
A
3561 } else { // non teamid binary needs to be the same for app slices
3562 if (oblob->csb_platform_binary ||
3563 oblob->csb_teamid != NULL) {
fe8ab488
A
3564 vnode_unlock(vp);
3565 error = EALREADY;
3566 goto out;
3567 }
0a7de745 3568 }
2d21ac55 3569
0a7de745
A
3570 oblob_start_offset = (oblob->csb_base_offset +
3571 oblob->csb_start_offset);
3572 oblob_end_offset = (oblob->csb_base_offset +
3573 oblob->csb_end_offset);
3574 if (blob_start_offset >= oblob_end_offset ||
3575 blob_end_offset <= oblob_start_offset) {
3576 /* no conflict with this existing blob */
3577 } else {
3578 /* conflict ! */
3579 if (blob_start_offset == oblob_start_offset &&
3580 blob_end_offset == oblob_end_offset &&
3581 blob->csb_mem_size == oblob->csb_mem_size &&
3582 blob->csb_flags == oblob->csb_flags &&
3583 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3584 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3585 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3586 !bcmp(blob->csb_cdhash,
3587 oblob->csb_cdhash,
3588 CS_CDHASH_LEN)) {
3589 /*
3590 * We already have this blob:
3591 * we'll return success but
3592 * throw away the new blob.
3593 */
3594 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3595 /*
3596 * The old blob matches this one
3597 * but doesn't have any CPU type.
3598 * Update it with whatever the caller
3599 * provided this time.
3600 */
3601 oblob->csb_cpu_type = cputype;
3602 }
3603
3604 /* The signature is still accepted, so update the
3605 * generation count. */
3606 uip->cs_add_gen = cs_blob_generation_count;
3607
3608 vnode_unlock(vp);
3609 if (ret_blob) {
3610 *ret_blob = oblob;
3611 }
3612 error = EAGAIN;
3613 goto out;
3614 } else {
3615 /* different blob: reject the new one */
3616 vnode_unlock(vp);
3617 error = EALREADY;
3618 goto out;
3619 }
3620 }
2d21ac55
A
3621 }
3622
fe8ab488 3623
2d21ac55
A
3624 /* mark this vnode's VM object as having "signed pages" */
3625 kr = memory_object_signed(uip->ui_control, TRUE);
3626 if (kr != KERN_SUCCESS) {
3627 vnode_unlock(vp);
3628 error = ENOENT;
3629 goto out;
3630 }
3631
15129b1c
A
3632 if (uip->cs_blobs == NULL) {
3633 /* loading 1st blob: record the file's current "modify time" */
3634 record_mtime = TRUE;
3635 }
3636
fe8ab488
A
3637 /* set the generation count for cs_blobs */
3638 uip->cs_add_gen = cs_blob_generation_count;
3639
2d21ac55
A
3640 /*
3641 * Add this blob to the list of blobs for this vnode.
3642 * We always add at the front of the list and we never remove a
3643 * blob from the list, so ubc_cs_get_blobs() can return whatever
3644 * the top of the list was and that list will remain valid
3645 * while we validate a page, even after we release the vnode's lock.
3646 */
3647 blob->csb_next = uip->cs_blobs;
3648 uip->cs_blobs = blob;
3649
3650 OSAddAtomic(+1, &cs_blob_count);
3651 if (cs_blob_count > cs_blob_count_peak) {
3652 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
3653 }
0a7de745 3654 OSAddAtomic((SInt32) + blob->csb_mem_size, &cs_blob_size);
b0d623f7
A
3655 if ((SInt32) cs_blob_size > cs_blob_size_peak) {
3656 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
2d21ac55 3657 }
b0d623f7
A
3658 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
3659 cs_blob_size_max = (UInt32) blob->csb_mem_size;
2d21ac55
A
3660 }
3661
c331a0be 3662 if (cs_debug > 1) {
2d21ac55 3663 proc_t p;
39236c6e 3664 const char *name = vnode_getname_printable(vp);
2d21ac55
A
3665 p = current_proc();
3666 printf("CODE SIGNING: proc %d(%s) "
0a7de745
A
3667 "loaded %s signatures for file (%s) "
3668 "range 0x%llx:0x%llx flags 0x%x\n",
3669 p->p_pid, p->p_comm,
3670 blob->csb_cpu_type == -1 ? "detached" : "embedded",
3671 name,
3672 blob->csb_base_offset + blob->csb_start_offset,
3673 blob->csb_base_offset + blob->csb_end_offset,
3674 blob->csb_flags);
39236c6e 3675 vnode_putname_printable(name);
2d21ac55
A
3676 }
3677
2d21ac55
A
3678 vnode_unlock(vp);
3679
15129b1c
A
3680 if (record_mtime) {
3681 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3682 }
3683
0a7de745 3684 if (ret_blob) {
3e170ce0 3685 *ret_blob = blob;
0a7de745 3686 }
3e170ce0 3687
0a7de745 3688 error = 0; /* success ! */
2d21ac55
A
3689
3690out:
3691 if (error) {
0a7de745 3692 if (cs_debug) {
fe8ab488 3693 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
0a7de745 3694 }
fe8ab488 3695
0a7de745 3696 cs_blob_free(blob);
2d21ac55
A
3697 }
3698
3699 if (error == EAGAIN) {
3700 /*
0a7de745 3701 * See above: error is EAGAIN if we were asked
2d21ac55
A
3702 * to add an existing blob again. We cleaned the new
3703 * blob and we want to return success.
3704 */
3705 error = 0;
2d21ac55
A
3706 }
3707
3708 return error;
91447636
A
3709}
3710
3e170ce0
A
3711void
3712csvnode_print_debug(struct vnode *vp)
3713{
0a7de745
A
3714 const char *name = NULL;
3715 struct ubc_info *uip;
3e170ce0
A
3716 struct cs_blob *blob;
3717
3718 name = vnode_getname_printable(vp);
3719 if (name) {
3720 printf("csvnode: name: %s\n", name);
3721 vnode_putname_printable(name);
3722 }
3723
3724 vnode_lock_spin(vp);
3725
0a7de745 3726 if (!UBCINFOEXISTS(vp)) {
3e170ce0
A
3727 blob = NULL;
3728 goto out;
3729 }
3730
3731 uip = vp->v_ubcinfo;
3732 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
3733 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
0a7de745
A
3734 (unsigned long)blob->csb_start_offset,
3735 (unsigned long)blob->csb_end_offset,
3736 blob->csb_flags,
3737 blob->csb_platform_binary ? "yes" : "no",
3738 blob->csb_platform_path ? "yes" : "no",
3739 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3e170ce0
A
3740 }
3741
3742out:
3743 vnode_unlock(vp);
3e170ce0
A
3744}
3745
2d21ac55
A
3746struct cs_blob *
3747ubc_cs_blob_get(
0a7de745
A
3748 struct vnode *vp,
3749 cpu_type_t cputype,
3750 off_t offset)
91447636 3751{
0a7de745
A
3752 struct ubc_info *uip;
3753 struct cs_blob *blob;
2d21ac55
A
3754 off_t offset_in_blob;
3755
3756 vnode_lock_spin(vp);
3757
0a7de745 3758 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3759 blob = NULL;
3760 goto out;
3761 }
3762
3763 uip = vp->v_ubcinfo;
3764 for (blob = uip->cs_blobs;
0a7de745
A
3765 blob != NULL;
3766 blob = blob->csb_next) {
2d21ac55
A
3767 if (cputype != -1 && blob->csb_cpu_type == cputype) {
3768 break;
3769 }
3770 if (offset != -1) {
3771 offset_in_blob = offset - blob->csb_base_offset;
3772 if (offset_in_blob >= blob->csb_start_offset &&
3773 offset_in_blob < blob->csb_end_offset) {
3774 /* our offset is covered by this blob */
3775 break;
3776 }
3777 }
3778 }
3779
3780out:
3781 vnode_unlock(vp);
3782
3783 return blob;
91447636 3784}
2d21ac55
A
3785
3786static void
3787ubc_cs_free(
0a7de745 3788 struct ubc_info *uip)
91447636 3789{
0a7de745 3790 struct cs_blob *blob, *next_blob;
2d21ac55
A
3791
3792 for (blob = uip->cs_blobs;
0a7de745
A
3793 blob != NULL;
3794 blob = next_blob) {
2d21ac55 3795 next_blob = blob->csb_next;
2d21ac55 3796 OSAddAtomic(-1, &cs_blob_count);
0a7de745 3797 OSAddAtomic((SInt32) - blob->csb_mem_size, &cs_blob_size);
d9a64523 3798 cs_blob_free(blob);
2d21ac55 3799 }
6d2010ae
A
3800#if CHECK_CS_VALIDATION_BITMAP
3801 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
3802#endif
2d21ac55 3803 uip->cs_blobs = NULL;
91447636 3804}
2d21ac55 3805
fe8ab488
A
3806/* check cs blob generation on vnode
3807 * returns:
3808 * 0 : Success, the cs_blob attached is current
3809 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3810 */
3811int
3812ubc_cs_generation_check(
0a7de745 3813 struct vnode *vp)
fe8ab488
A
3814{
3815 int retval = ENEEDAUTH;
3816
3817 vnode_lock_spin(vp);
3818
3819 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
3820 retval = 0;
3821 }
3822
3823 vnode_unlock(vp);
3824 return retval;
3825}
3826
3827int
3828ubc_cs_blob_revalidate(
0a7de745 3829 struct vnode *vp,
c18c124e 3830 struct cs_blob *blob,
39037602
A
3831 struct image_params *imgp,
3832 int flags
fe8ab488
A
3833 )
3834{
3835 int error = 0;
fe8ab488 3836 const CS_CodeDirectory *cd = NULL;
39037602 3837 const CS_GenericBlob *entitlements = NULL;
813fb2f6 3838 size_t size;
fe8ab488
A
3839 assert(vp != NULL);
3840 assert(blob != NULL);
3841
813fb2f6
A
3842 size = blob->csb_mem_size;
3843 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 3844 size, &cd, &entitlements);
fe8ab488
A
3845 if (error) {
3846 if (cs_debug) {
3847 printf("CODESIGNING: csblob invalid: %d\n", error);
3848 }
3849 goto out;
3850 }
3851
0a7de745
A
3852 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3853 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
d9a64523
A
3854
3855 if (blob->csb_reconstituted) {
3856 /*
3857 * Code signatures that have been modified after validation
3858 * cannot be revalidated inline from their in-memory blob.
3859 *
3860 * That's okay, though, because the only path left that relies
3861 * on revalidation of existing in-memory blobs is the legacy
3862 * detached signature database path, which only exists on macOS,
3863 * which does not do reconstitution of any kind.
3864 */
3865 if (cs_debug) {
3866 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3867 }
3868
3869 /*
3870 * EAGAIN tells the caller that they may reread the code
3871 * signature and try attaching it again, which is the same
3872 * thing they would do if there was no cs_blob yet in the
3873 * first place.
3874 *
3875 * Conveniently, after ubc_cs_blob_add did a successful
3876 * validation, it will detect that a matching cs_blob (cdhash,
3877 * offset, arch etc.) already exists, and return success
3878 * without re-adding a cs_blob to the vnode.
3879 */
3880 return EAGAIN;
3881 }
3882
fe8ab488
A
3883 /* callout to mac_vnode_check_signature */
3884#if CONFIG_MACF
5ba3f43e 3885 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
fe8ab488 3886 if (cs_debug && error) {
0a7de745 3887 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
fe8ab488 3888 }
39037602
A
3889#else
3890 (void)flags;
5ba3f43e 3891 (void)signer_type;
fe8ab488
A
3892#endif
3893
3894 /* update generation number if success */
3895 vnode_lock_spin(vp);
0a7de745 3896 blob->csb_flags = cs_flags;
5ba3f43e 3897 blob->csb_signer_type = signer_type;
fe8ab488 3898 if (UBCINFOEXISTS(vp)) {
0a7de745 3899 if (error == 0) {
fe8ab488 3900 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
0a7de745 3901 } else {
fe8ab488 3902 vp->v_ubcinfo->cs_add_gen = 0;
0a7de745 3903 }
fe8ab488
A
3904 }
3905
3906 vnode_unlock(vp);
3907
3908out:
3909 return error;
3910}
3911
3912void
3913cs_blob_reset_cache()
3914{
3915 /* incrementing odd no by 2 makes sure '0' is never reached. */
3916 OSAddAtomic(+2, &cs_blob_generation_count);
3917 printf("Reseting cs_blob cache from all vnodes. \n");
3918}
3919
2d21ac55
A
3920struct cs_blob *
3921ubc_get_cs_blobs(
0a7de745 3922 struct vnode *vp)
91447636 3923{
0a7de745
A
3924 struct ubc_info *uip;
3925 struct cs_blob *blobs;
2d21ac55 3926
b0d623f7
A
3927 /*
3928 * No need to take the vnode lock here. The caller must be holding
3929 * a reference on the vnode (via a VM mapping or open file descriptor),
3930 * so the vnode will not go away. The ubc_info stays until the vnode
3931 * goes away. And we only modify "blobs" by adding to the head of the
3932 * list.
3933 * The ubc_info could go away entirely if the vnode gets reclaimed as
3934 * part of a forced unmount. In the case of a code-signature validation
3935 * during a page fault, the "paging_in_progress" reference on the VM
3936 * object guarantess that the vnode pager (and the ubc_info) won't go
3937 * away during the fault.
3938 * Other callers need to protect against vnode reclaim by holding the
3939 * vnode lock, for example.
3940 */
2d21ac55 3941
0a7de745 3942 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3943 blobs = NULL;
3944 goto out;
3945 }
3946
3947 uip = vp->v_ubcinfo;
3948 blobs = uip->cs_blobs;
3949
3950out:
2d21ac55 3951 return blobs;
91447636 3952}
2d21ac55 3953
15129b1c
A
3954void
3955ubc_get_cs_mtime(
0a7de745
A
3956 struct vnode *vp,
3957 struct timespec *cs_mtime)
15129b1c 3958{
0a7de745 3959 struct ubc_info *uip;
15129b1c 3960
0a7de745 3961 if (!UBCINFOEXISTS(vp)) {
15129b1c
A
3962 cs_mtime->tv_sec = 0;
3963 cs_mtime->tv_nsec = 0;
3964 return;
3965 }
3966
3967 uip = vp->v_ubcinfo;
3968 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
3969 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
3970}
3971
2d21ac55
A
3972unsigned long cs_validate_page_no_hash = 0;
3973unsigned long cs_validate_page_bad_hash = 0;
39037602
A
3974static boolean_t
3975cs_validate_hash(
0a7de745
A
3976 struct cs_blob *blobs,
3977 memory_object_t pager,
3978 memory_object_offset_t page_offset,
3979 const void *data,
3980 vm_size_t *bytes_processed,
3981 unsigned *tainted)
91447636 3982{
0a7de745
A
3983 union cs_hash_union mdctx;
3984 struct cs_hash const *hashtype = NULL;
3985 unsigned char actual_hash[CS_HASH_MAX_SIZE];
3986 unsigned char expected_hash[CS_HASH_MAX_SIZE];
3987 boolean_t found_hash;
3988 struct cs_blob *blob;
3989 const CS_CodeDirectory *cd;
3990 const unsigned char *hash;
3991 boolean_t validated;
3992 off_t offset; /* page offset in the file */
3993 size_t size;
3994 off_t codeLimit = 0;
3995 const char *lower_bound, *upper_bound;
3996 vm_offset_t kaddr, blob_addr;
2d21ac55
A
3997
3998 /* retrieve the expected hash */
3999 found_hash = FALSE;
2d21ac55
A
4000
4001 for (blob = blobs;
0a7de745
A
4002 blob != NULL;
4003 blob = blob->csb_next) {
2d21ac55
A
4004 offset = page_offset - blob->csb_base_offset;
4005 if (offset < blob->csb_start_offset ||
4006 offset >= blob->csb_end_offset) {
4007 /* our page is not covered by this blob */
4008 continue;
4009 }
4010
39037602 4011 /* blob data has been released */
2d21ac55
A
4012 kaddr = blob->csb_mem_kaddr;
4013 if (kaddr == 0) {
39037602 4014 continue;
2d21ac55 4015 }
39236c6e 4016
2d21ac55 4017 blob_addr = kaddr + blob->csb_mem_offset;
2d21ac55
A
4018 lower_bound = CAST_DOWN(char *, blob_addr);
4019 upper_bound = lower_bound + blob->csb_mem_size;
0a7de745 4020
490019cf 4021 cd = blob->csb_cd;
2d21ac55 4022 if (cd != NULL) {
3e170ce0 4023 /* all CD's that have been injected is already validated */
b0d623f7 4024
3e170ce0 4025 hashtype = blob->csb_hashtype;
0a7de745 4026 if (hashtype == NULL) {
3e170ce0 4027 panic("unknown hash type ?");
0a7de745
A
4028 }
4029 if (hashtype->cs_digest_size > sizeof(actual_hash)) {
3e170ce0 4030 panic("hash size too large");
0a7de745
A
4031 }
4032 if (offset & blob->csb_hash_pagemask) {
39037602 4033 panic("offset not aligned to cshash boundary");
0a7de745 4034 }
3e170ce0 4035
2d21ac55 4036 codeLimit = ntohl(cd->codeLimit);
39236c6e 4037
0a7de745
A
4038 hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
4039 hashtype->cs_size,
4040 lower_bound, upper_bound);
cf7d32b8 4041 if (hash != NULL) {
490019cf 4042 bcopy(hash, expected_hash, hashtype->cs_size);
cf7d32b8
A
4043 found_hash = TRUE;
4044 }
2d21ac55 4045
2d21ac55
A
4046 break;
4047 }
4048 }
4049
4050 if (found_hash == FALSE) {
4051 /*
4052 * We can't verify this page because there is no signature
4053 * for it (yet). It's possible that this part of the object
4054 * is not signed, or that signatures for that part have not
4055 * been loaded yet.
4056 * Report that the page has not been validated and let the
4057 * caller decide if it wants to accept it or not.
4058 */
4059 cs_validate_page_no_hash++;
4060 if (cs_debug > 1) {
4061 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4062 "mobj %p off 0x%llx: no hash to validate !?\n",
4063 pager, page_offset);
2d21ac55
A
4064 }
4065 validated = FALSE;
c18c124e 4066 *tainted = 0;
2d21ac55 4067 } else {
c18c124e
A
4068 *tainted = 0;
4069
39037602
A
4070 size = blob->csb_hash_pagesize;
4071 *bytes_processed = size;
4072
fe8ab488 4073 const uint32_t *asha1, *esha1;
b0d623f7 4074 if ((off_t)(offset + size) > codeLimit) {
2d21ac55
A
4075 /* partial page at end of segment */
4076 assert(offset < codeLimit);
39037602 4077 size = (size_t) (codeLimit & blob->csb_hash_pagemask);
c18c124e 4078 *tainted |= CS_VALIDATE_NX;
2d21ac55 4079 }
3e170ce0
A
4080
4081 hashtype->cs_init(&mdctx);
39037602
A
4082
4083 if (blob->csb_hash_firstlevel_pagesize) {
4084 const unsigned char *partial_data = (const unsigned char *)data;
4085 size_t i;
0a7de745
A
4086 for (i = 0; i < size;) {
4087 union cs_hash_union partialctx;
39037602 4088 unsigned char partial_digest[CS_HASH_MAX_SIZE];
0a7de745 4089 size_t partial_size = MIN(size - i, blob->csb_hash_firstlevel_pagesize);
39037602
A
4090
4091 hashtype->cs_init(&partialctx);
4092 hashtype->cs_update(&partialctx, partial_data, partial_size);
4093 hashtype->cs_final(partial_digest, &partialctx);
4094
4095 /* Update cumulative multi-level hash */
4096 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
4097 partial_data = partial_data + partial_size;
4098 i += partial_size;
4099 }
4100 } else {
4101 hashtype->cs_update(&mdctx, data, size);
4102 }
3e170ce0 4103 hashtype->cs_final(actual_hash, &mdctx);
2d21ac55 4104
fe8ab488
A
4105 asha1 = (const uint32_t *) actual_hash;
4106 esha1 = (const uint32_t *) expected_hash;
4107
490019cf 4108 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
2d21ac55
A
4109 if (cs_debug) {
4110 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4111 "mobj %p off 0x%llx size 0x%lx: "
4112 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4113 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4114 pager, page_offset, size,
4115 asha1[0], asha1[1], asha1[2],
4116 asha1[3], asha1[4],
4117 esha1[0], esha1[1], esha1[2],
4118 esha1[3], esha1[4]);
2d21ac55
A
4119 }
4120 cs_validate_page_bad_hash++;
c18c124e 4121 *tainted |= CS_VALIDATE_TAINTED;
2d21ac55 4122 } else {
39236c6e 4123 if (cs_debug > 10) {
2d21ac55 4124 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4125 "mobj %p off 0x%llx size 0x%lx: "
4126 "SHA1 OK\n",
4127 pager, page_offset, size);
2d21ac55 4128 }
2d21ac55
A
4129 }
4130 validated = TRUE;
4131 }
0a7de745 4132
2d21ac55 4133 return validated;
91447636
A
4134}
4135
39037602
A
4136boolean_t
4137cs_validate_range(
0a7de745
A
4138 struct vnode *vp,
4139 memory_object_t pager,
4140 memory_object_offset_t page_offset,
4141 const void *data,
4142 vm_size_t dsize,
4143 unsigned *tainted)
39037602
A
4144{
4145 vm_size_t offset_in_range;
4146 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4147
4148 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4149
4150 *tainted = 0;
4151
4152 for (offset_in_range = 0;
0a7de745
A
4153 offset_in_range < dsize;
4154 /* offset_in_range updated based on bytes processed */) {
39037602
A
4155 unsigned subrange_tainted = 0;
4156 boolean_t subrange_validated;
4157 vm_size_t bytes_processed = 0;
4158
4159 subrange_validated = cs_validate_hash(blobs,
0a7de745
A
4160 pager,
4161 page_offset + offset_in_range,
4162 (const void *)((const char *)data + offset_in_range),
4163 &bytes_processed,
4164 &subrange_tainted);
39037602
A
4165
4166 *tainted |= subrange_tainted;
4167
4168 if (bytes_processed == 0) {
4169 /* Cannote make forward progress, so return an error */
4170 all_subranges_validated = FALSE;
4171 break;
4172 } else if (subrange_validated == FALSE) {
4173 all_subranges_validated = FALSE;
4174 /* Keep going to detect other types of failures in subranges */
4175 }
4176
4177 offset_in_range += bytes_processed;
4178 }
4179
4180 return all_subranges_validated;
4181}
4182
2d21ac55
A
4183int
4184ubc_cs_getcdhash(
0a7de745
A
4185 vnode_t vp,
4186 off_t offset,
4187 unsigned char *cdhash)
2d21ac55 4188{
0a7de745
A
4189 struct cs_blob *blobs, *blob;
4190 off_t rel_offset;
4191 int ret;
b0d623f7
A
4192
4193 vnode_lock(vp);
2d21ac55
A
4194
4195 blobs = ubc_get_cs_blobs(vp);
4196 for (blob = blobs;
0a7de745
A
4197 blob != NULL;
4198 blob = blob->csb_next) {
2d21ac55
A
4199 /* compute offset relative to this blob */
4200 rel_offset = offset - blob->csb_base_offset;
4201 if (rel_offset >= blob->csb_start_offset &&
4202 rel_offset < blob->csb_end_offset) {
4203 /* this blob does cover our "offset" ! */
4204 break;
4205 }
4206 }
4207
4208 if (blob == NULL) {
4209 /* we didn't find a blob covering "offset" */
b0d623f7
A
4210 ret = EBADEXEC; /* XXX any better error ? */
4211 } else {
4212 /* get the SHA1 hash of that blob */
0a7de745 4213 bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
b0d623f7 4214 ret = 0;
2d21ac55
A
4215 }
4216
b0d623f7 4217 vnode_unlock(vp);
2d21ac55 4218
b0d623f7 4219 return ret;
2d21ac55 4220}
6d2010ae 4221
39037602
A
4222boolean_t
4223ubc_cs_is_range_codesigned(
0a7de745
A
4224 vnode_t vp,
4225 mach_vm_offset_t start,
4226 mach_vm_size_t size)
39037602 4227{
0a7de745
A
4228 struct cs_blob *csblob;
4229 mach_vm_offset_t blob_start;
4230 mach_vm_offset_t blob_end;
39037602
A
4231
4232 if (vp == NULL) {
4233 /* no file: no code signature */
4234 return FALSE;
4235 }
4236 if (size == 0) {
4237 /* no range: no code signature */
4238 return FALSE;
4239 }
4240 if (start + size < start) {
4241 /* overflow */
4242 return FALSE;
4243 }
4244
4245 csblob = ubc_cs_blob_get(vp, -1, start);
4246 if (csblob == NULL) {
4247 return FALSE;
4248 }
4249
4250 /*
4251 * We currently check if the range is covered by a single blob,
4252 * which should always be the case for the dyld shared cache.
4253 * If we ever want to make this routine handle other cases, we
4254 * would have to iterate if the blob does not cover the full range.
4255 */
4256 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4257 csblob->csb_start_offset);
39037602 4258 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4259 csblob->csb_end_offset);
39037602
A
4260 if (blob_start > start || blob_end < (start + size)) {
4261 /* range not fully covered by this code-signing blob */
4262 return FALSE;
4263 }
4264
4265 return TRUE;
4266}
4267
6d2010ae 4268#if CHECK_CS_VALIDATION_BITMAP
0a7de745
A
4269#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4270extern boolean_t root_fs_upgrade_try;
6d2010ae
A
4271
4272/*
4273 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4274 * Depends:
4275 * a) Is the target vnode on the root filesystem?
4276 * b) Has someone tried to mount the root filesystem read-write?
4277 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4278 */
0a7de745 4279#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6d2010ae
A
4280kern_return_t
4281ubc_cs_validation_bitmap_allocate(
0a7de745 4282 vnode_t vp)
6d2010ae 4283{
0a7de745 4284 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4285 struct ubc_info *uip;
0a7de745
A
4286 char *target_bitmap;
4287 vm_object_size_t bitmap_size;
6d2010ae 4288
0a7de745 4289 if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6d2010ae
A
4290 kr = KERN_INVALID_ARGUMENT;
4291 } else {
4292 uip = vp->v_ubcinfo;
4293
0a7de745 4294 if (uip->cs_valid_bitmap == NULL) {
6d2010ae 4295 bitmap_size = stob(uip->ui_size);
0a7de745 4296 target_bitmap = (char*) kalloc((vm_size_t)bitmap_size );
6d2010ae
A
4297 if (target_bitmap == 0) {
4298 kr = KERN_NO_SPACE;
4299 } else {
4300 kr = KERN_SUCCESS;
4301 }
0a7de745 4302 if (kr == KERN_SUCCESS) {
6d2010ae
A
4303 memset( target_bitmap, 0, (size_t)bitmap_size);
4304 uip->cs_valid_bitmap = (void*)target_bitmap;
4305 uip->cs_valid_bitmap_size = bitmap_size;
4306 }
4307 }
4308 }
4309 return kr;
4310}
4311
4312kern_return_t
0a7de745
A
4313ubc_cs_check_validation_bitmap(
4314 vnode_t vp,
4315 memory_object_offset_t offset,
4316 int optype)
6d2010ae 4317{
0a7de745 4318 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4319
0a7de745 4320 if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6d2010ae
A
4321 kr = KERN_INVALID_ARGUMENT;
4322 } else {
4323 struct ubc_info *uip = vp->v_ubcinfo;
0a7de745 4324 char *target_bitmap = uip->cs_valid_bitmap;
6d2010ae 4325
0a7de745
A
4326 if (target_bitmap == NULL) {
4327 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4328 } else {
0a7de745 4329 uint64_t bit, byte;
6d2010ae
A
4330 bit = atop_64( offset );
4331 byte = bit >> 3;
4332
0a7de745
A
4333 if (byte > uip->cs_valid_bitmap_size) {
4334 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4335 } else {
6d2010ae
A
4336 if (optype == CS_BITMAP_SET) {
4337 target_bitmap[byte] |= (1 << (bit & 07));
4338 kr = KERN_SUCCESS;
4339 } else if (optype == CS_BITMAP_CLEAR) {
4340 target_bitmap[byte] &= ~(1 << (bit & 07));
4341 kr = KERN_SUCCESS;
4342 } else if (optype == CS_BITMAP_CHECK) {
0a7de745 4343 if (target_bitmap[byte] & (1 << (bit & 07))) {
6d2010ae
A
4344 kr = KERN_SUCCESS;
4345 } else {
4346 kr = KERN_FAILURE;
4347 }
4348 }
4349 }
4350 }
4351 }
4352 return kr;
4353}
4354
4355void
4356ubc_cs_validation_bitmap_deallocate(
0a7de745 4357 vnode_t vp)
6d2010ae
A
4358{
4359 struct ubc_info *uip;
0a7de745
A
4360 void *target_bitmap;
4361 vm_object_size_t bitmap_size;
6d2010ae 4362
0a7de745 4363 if (UBCINFOEXISTS(vp)) {
6d2010ae
A
4364 uip = vp->v_ubcinfo;
4365
0a7de745 4366 if ((target_bitmap = uip->cs_valid_bitmap) != NULL) {
6d2010ae
A
4367 bitmap_size = uip->cs_valid_bitmap_size;
4368 kfree( target_bitmap, (vm_size_t) bitmap_size );
4369 uip->cs_valid_bitmap = NULL;
4370 }
4371 }
4372}
4373#else
0a7de745
A
4374kern_return_t
4375ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
4376{
6d2010ae
A
4377 return KERN_INVALID_ARGUMENT;
4378}
4379
0a7de745
A
4380kern_return_t
4381ubc_cs_check_validation_bitmap(
4382 __unused struct vnode *vp,
6d2010ae 4383 __unused memory_object_offset_t offset,
0a7de745
A
4384 __unused int optype)
4385{
6d2010ae
A
4386 return KERN_INVALID_ARGUMENT;
4387}
4388
0a7de745
A
4389void
4390ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
4391{
6d2010ae
A
4392 return;
4393}
4394#endif /* CHECK_CS_VALIDATION_BITMAP */
d9a64523
A
4395
4396#if PMAP_CS
4397kern_return_t
4398cs_associate_blob_with_mapping(
0a7de745
A
4399 void *pmap,
4400 vm_map_offset_t start,
4401 vm_map_size_t size,
4402 vm_object_offset_t offset,
4403 void *blobs_p)
d9a64523 4404{
0a7de745
A
4405 off_t blob_start_offset, blob_end_offset;
4406 kern_return_t kr;
4407 struct cs_blob *blobs, *blob;
4408 vm_offset_t kaddr;
d9a64523
A
4409 struct pmap_cs_code_directory *cd_entry = NULL;
4410
4411 if (!pmap_cs) {
4412 return KERN_NOT_SUPPORTED;
4413 }
0a7de745 4414
d9a64523
A
4415 blobs = (struct cs_blob *)blobs_p;
4416
4417 for (blob = blobs;
0a7de745
A
4418 blob != NULL;
4419 blob = blob->csb_next) {
d9a64523 4420 blob_start_offset = (blob->csb_base_offset +
0a7de745 4421 blob->csb_start_offset);
d9a64523 4422 blob_end_offset = (blob->csb_base_offset +
0a7de745 4423 blob->csb_end_offset);
d9a64523
A
4424 if ((off_t) offset < blob_start_offset ||
4425 (off_t) offset >= blob_end_offset ||
4426 (off_t) (offset + size) <= blob_start_offset ||
4427 (off_t) (offset + size) > blob_end_offset) {
4428 continue;
4429 }
4430 kaddr = blob->csb_mem_kaddr;
4431 if (kaddr == 0) {
4432 /* blob data has been released */
4433 continue;
4434 }
4435 cd_entry = blob->csb_pmap_cs_entry;
4436 if (cd_entry == NULL) {
4437 continue;
4438 }
4439
4440 break;
4441 }
4442
4443 if (cd_entry != NULL) {
4444 kr = pmap_cs_associate(pmap,
0a7de745
A
4445 cd_entry,
4446 start,
4447 size);
d9a64523
A
4448 } else {
4449 kr = KERN_CODESIGN_ERROR;
4450 }
4451#if 00
4452 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
4453 kr = KERN_SUCCESS;
4454#endif
4455 return kr;
4456}
4457#endif /* PMAP_CS */