]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
f427ee49 2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
0a7de745 28/*
1c79356b
A
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
0a7de745 37 */
1c79356b 38
1c79356b
A
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
91447636
A
43#include <sys/mman.h>
44#include <sys/mount_internal.h>
45#include <sys/vnode_internal.h>
46#include <sys/ubc_internal.h>
1c79356b 47#include <sys/ucred.h>
91447636
A
48#include <sys/proc_internal.h>
49#include <sys/kauth.h>
1c79356b 50#include <sys/buf.h>
13fec989 51#include <sys/user.h>
2d21ac55 52#include <sys/codesign.h>
fe8ab488
A
53#include <sys/codedir_internal.h>
54#include <sys/fsevents.h>
c18c124e 55#include <sys/fcntl.h>
f427ee49 56#include <sys/reboot.h>
1c79356b
A
57
58#include <mach/mach_types.h>
59#include <mach/memory_object_types.h>
91447636
A
60#include <mach/memory_object_control.h>
61#include <mach/vm_map.h>
b0d623f7 62#include <mach/mach_vm.h>
91447636 63#include <mach/upl.h>
1c79356b 64
91447636 65#include <kern/kern_types.h>
2d21ac55 66#include <kern/kalloc.h>
1c79356b 67#include <kern/zalloc.h>
13fec989 68#include <kern/thread.h>
5ba3f43e 69#include <vm/pmap.h>
91447636
A
70#include <vm/vm_kern.h>
71#include <vm/vm_protos.h> /* last */
1c79356b 72
2d21ac55 73#include <libkern/crypto/sha1.h>
3e170ce0 74#include <libkern/crypto/sha2.h>
39236c6e 75#include <libkern/libkern.h>
f427ee49 76#include <libkern/ptrauth_utils.h>
39236c6e 77
593a1d5f 78#include <security/mac_framework.h>
fe8ab488 79#include <stdbool.h>
f427ee49 80#include <stdatomic.h>
593a1d5f 81
2d21ac55
A
82/* XXX These should be in a BSD accessible Mach header, but aren't. */
83extern kern_return_t memory_object_pages_resident(memory_object_control_t,
0a7de745
A
84 boolean_t *);
85extern kern_return_t memory_object_signed(memory_object_control_t control,
86 boolean_t is_signed);
87extern boolean_t memory_object_is_signed(memory_object_control_t);
cb323159
A
88extern void memory_object_mark_trusted(
89 memory_object_control_t control);
6d2010ae 90
d9a64523
A
91/* XXX Same for those. */
92
2d21ac55
A
93extern void Debugger(const char *message);
94
95
96/* XXX no one uses this interface! */
97kern_return_t ubc_page_op_with_control(
0a7de745
A
98 memory_object_control_t control,
99 off_t f_offset,
100 int ops,
101 ppnum_t *phys_entryp,
102 int *flagsp);
2d21ac55
A
103
104
1c79356b
A
105#if DIAGNOSTIC
106#if defined(assert)
b0d623f7 107#undef assert
1c79356b
A
108#endif
109#define assert(cond) \
2d21ac55 110 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
1c79356b
A
111#else
112#include <kern/assert.h>
113#endif /* DIAGNOSTIC */
114
2d21ac55 115static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
0c530ab8 116static int ubc_umcallback(vnode_t, void *);
0c530ab8 117static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
2d21ac55 118static void ubc_cs_free(struct ubc_info *uip);
b4c24cb9 119
39037602 120static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
d9a64523 121static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
39037602 122
f427ee49
A
123ZONE_DECLARE(ubc_info_zone, "ubc_info zone", sizeof(struct ubc_info),
124 ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
0a7de745 125static uint32_t cs_blob_generation_count = 1;
2d21ac55
A
126
127/*
128 * CODESIGNING
129 * Routines to navigate code signing data structures in the kernel...
130 */
b0d623f7
A
131
132extern int cs_debug;
133
0a7de745 134#define PAGE_SHIFT_4K (12)
fe8ab488 135
2d21ac55
A
136static boolean_t
137cs_valid_range(
138 const void *start,
139 const void *end,
140 const void *lower_bound,
141 const void *upper_bound)
142{
143 if (upper_bound < lower_bound ||
144 end < start) {
145 return FALSE;
146 }
147
148 if (start < lower_bound ||
149 end > upper_bound) {
150 return FALSE;
151 }
152
153 return TRUE;
154}
155
3e170ce0
A
156typedef void (*cs_md_init)(void *ctx);
157typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
158typedef void (*cs_md_final)(void *hash, void *ctx);
159
160struct cs_hash {
0a7de745
A
161 uint8_t cs_type; /* type code as per code signing */
162 size_t cs_size; /* size of effective hash (may be truncated) */
163 size_t cs_digest_size;/* size of native hash */
164 cs_md_init cs_init;
165 cs_md_update cs_update;
166 cs_md_final cs_final;
3e170ce0
A
167};
168
0a7de745
A
169uint8_t
170cs_hash_type(
171 struct cs_hash const * const cs_hash)
5ba3f43e 172{
0a7de745 173 return cs_hash->cs_type;
5ba3f43e
A
174}
175
d190cdc3 176static const struct cs_hash cs_hash_sha1 = {
0a7de745
A
177 .cs_type = CS_HASHTYPE_SHA1,
178 .cs_size = CS_SHA1_LEN,
179 .cs_digest_size = SHA_DIGEST_LENGTH,
180 .cs_init = (cs_md_init)SHA1Init,
181 .cs_update = (cs_md_update)SHA1Update,
182 .cs_final = (cs_md_final)SHA1Final,
3e170ce0
A
183};
184#if CRYPTO_SHA2
d190cdc3 185static const struct cs_hash cs_hash_sha256 = {
0a7de745
A
186 .cs_type = CS_HASHTYPE_SHA256,
187 .cs_size = SHA256_DIGEST_LENGTH,
188 .cs_digest_size = SHA256_DIGEST_LENGTH,
189 .cs_init = (cs_md_init)SHA256_Init,
190 .cs_update = (cs_md_update)SHA256_Update,
191 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 192};
d190cdc3 193static const struct cs_hash cs_hash_sha256_truncate = {
0a7de745
A
194 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
195 .cs_size = CS_SHA256_TRUNCATED_LEN,
196 .cs_digest_size = SHA256_DIGEST_LENGTH,
197 .cs_init = (cs_md_init)SHA256_Init,
198 .cs_update = (cs_md_update)SHA256_Update,
199 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 200};
d190cdc3 201static const struct cs_hash cs_hash_sha384 = {
0a7de745
A
202 .cs_type = CS_HASHTYPE_SHA384,
203 .cs_size = SHA384_DIGEST_LENGTH,
204 .cs_digest_size = SHA384_DIGEST_LENGTH,
205 .cs_init = (cs_md_init)SHA384_Init,
206 .cs_update = (cs_md_update)SHA384_Update,
207 .cs_final = (cs_md_final)SHA384_Final,
490019cf 208};
3e170ce0 209#endif
39037602 210
d190cdc3 211static struct cs_hash const *
3e170ce0
A
212cs_find_md(uint8_t type)
213{
214 if (type == CS_HASHTYPE_SHA1) {
215 return &cs_hash_sha1;
216#if CRYPTO_SHA2
217 } else if (type == CS_HASHTYPE_SHA256) {
218 return &cs_hash_sha256;
219 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
220 return &cs_hash_sha256_truncate;
490019cf
A
221 } else if (type == CS_HASHTYPE_SHA384) {
222 return &cs_hash_sha384;
3e170ce0
A
223#endif
224 }
225 return NULL;
226}
227
228union cs_hash_union {
0a7de745
A
229 SHA1_CTX sha1ctxt;
230 SHA256_CTX sha256ctx;
231 SHA384_CTX sha384ctx;
3e170ce0
A
232};
233
234
2d21ac55 235/*
490019cf
A
236 * Choose among different hash algorithms.
237 * Higher is better, 0 => don't use at all.
2d21ac55 238 */
d190cdc3 239static const uint32_t hashPriorities[] = {
490019cf
A
240 CS_HASHTYPE_SHA1,
241 CS_HASHTYPE_SHA256_TRUNCATED,
242 CS_HASHTYPE_SHA256,
243 CS_HASHTYPE_SHA384,
244};
b0d623f7 245
490019cf
A
246static unsigned int
247hash_rank(const CS_CodeDirectory *cd)
248{
249 uint32_t type = cd->hashType;
250 unsigned int n;
2d21ac55 251
0a7de745
A
252 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
253 if (hashPriorities[n] == type) {
490019cf 254 return n + 1;
0a7de745
A
255 }
256 }
257 return 0; /* not supported */
2d21ac55
A
258}
259
260
261/*
262 * Locating a page hash
263 */
264static const unsigned char *
265hashes(
266 const CS_CodeDirectory *cd,
3e170ce0
A
267 uint32_t page,
268 size_t hash_len,
269 const char *lower_bound,
270 const char *upper_bound)
2d21ac55
A
271{
272 const unsigned char *base, *top, *hash;
b0d623f7 273 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
2d21ac55
A
274
275 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
276
0a7de745 277 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
b0d623f7 278 /* Get first scatter struct */
39236c6e 279 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745
A
280 ((const char*)cd + ntohl(cd->scatterOffset));
281 uint32_t hashindex = 0, scount, sbase = 0;
b0d623f7
A
282 /* iterate all scatter structs */
283 do {
0a7de745
A
284 if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
285 if (cs_debug) {
b0d623f7
A
286 printf("CODE SIGNING: Scatter extends past Code Directory\n");
287 }
288 return NULL;
289 }
0a7de745 290
b0d623f7
A
291 scount = ntohl(scatter->count);
292 uint32_t new_base = ntohl(scatter->base);
293
294 /* last scatter? */
295 if (scount == 0) {
296 return NULL;
297 }
0a7de745
A
298
299 if ((hashindex > 0) && (new_base <= sbase)) {
300 if (cs_debug) {
b0d623f7 301 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
0a7de745 302 sbase, new_base);
b0d623f7 303 }
0a7de745 304 return NULL; /* unordered scatter array */
b0d623f7
A
305 }
306 sbase = new_base;
307
308 /* this scatter beyond page we're looking for? */
309 if (sbase > page) {
310 return NULL;
311 }
0a7de745
A
312
313 if (sbase + scount >= page) {
314 /* Found the scatter struct that is
b0d623f7
A
315 * referencing our page */
316
317 /* base = address of first hash covered by scatter */
0a7de745
A
318 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
319 hashindex * hash_len;
b0d623f7 320 /* top = address of first hash after this scatter */
3e170ce0 321 top = base + scount * hash_len;
0a7de745
A
322 if (!cs_valid_range(base, top, lower_bound,
323 upper_bound) ||
b0d623f7
A
324 hashindex > nCodeSlots) {
325 return NULL;
326 }
0a7de745 327
b0d623f7
A
328 break;
329 }
0a7de745
A
330
331 /* this scatter struct is before the page we're looking
b0d623f7 332 * for. Iterate. */
0a7de745 333 hashindex += scount;
b0d623f7 334 scatter++;
0a7de745
A
335 } while (1);
336
3e170ce0 337 hash = base + (page - sbase) * hash_len;
b0d623f7
A
338 } else {
339 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
3e170ce0 340 top = base + nCodeSlots * hash_len;
b0d623f7
A
341 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
342 page > nCodeSlots) {
343 return NULL;
344 }
345 assert(page < nCodeSlots);
2d21ac55 346
3e170ce0 347 hash = base + page * hash_len;
b0d623f7 348 }
0a7de745 349
3e170ce0 350 if (!cs_valid_range(hash, hash + hash_len,
0a7de745 351 lower_bound, upper_bound)) {
2d21ac55
A
352 hash = NULL;
353 }
354
355 return hash;
356}
39236c6e
A
357
358/*
359 * cs_validate_codedirectory
360 *
361 * Validate that pointers inside the code directory to make sure that
362 * all offsets and lengths are constrained within the buffer.
363 *
364 * Parameters: cd Pointer to code directory buffer
365 * length Length of buffer
366 *
367 * Returns: 0 Success
368 * EBADEXEC Invalid code signature
369 */
370
371static int
372cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
373{
d190cdc3 374 struct cs_hash const *hashtype;
39236c6e 375
0a7de745 376 if (length < sizeof(*cd)) {
39236c6e 377 return EBADEXEC;
0a7de745
A
378 }
379 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
39236c6e 380 return EBADEXEC;
0a7de745
A
381 }
382 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
39236c6e 383 return EBADEXEC;
0a7de745 384 }
3e170ce0 385 hashtype = cs_find_md(cd->hashType);
0a7de745 386 if (hashtype == NULL) {
39236c6e 387 return EBADEXEC;
0a7de745 388 }
39236c6e 389
0a7de745 390 if (cd->hashSize != hashtype->cs_size) {
3e170ce0 391 return EBADEXEC;
0a7de745 392 }
3e170ce0 393
0a7de745 394 if (length < ntohl(cd->hashOffset)) {
39236c6e 395 return EBADEXEC;
0a7de745 396 }
39236c6e
A
397
398 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
0a7de745 399 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
39236c6e 400 return EBADEXEC;
0a7de745 401 }
39236c6e
A
402
403 /* check that codeslots fits in the buffer */
0a7de745 404 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
39236c6e 405 return EBADEXEC;
0a7de745 406 }
39236c6e 407
0a7de745
A
408 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
409 if (length < ntohl(cd->scatterOffset)) {
39236c6e 410 return EBADEXEC;
0a7de745 411 }
39236c6e 412
3e170ce0 413 const SC_Scatter *scatter = (const SC_Scatter *)
0a7de745 414 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
39236c6e
A
415 uint32_t nPages = 0;
416
417 /*
418 * Check each scatter buffer, since we don't know the
419 * length of the scatter buffer array, we have to
420 * check each entry.
421 */
0a7de745 422 while (1) {
39236c6e 423 /* check that the end of each scatter buffer in within the length */
0a7de745 424 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
39236c6e 425 return EBADEXEC;
0a7de745 426 }
39236c6e 427 uint32_t scount = ntohl(scatter->count);
0a7de745 428 if (scount == 0) {
39236c6e 429 break;
0a7de745
A
430 }
431 if (nPages + scount < nPages) {
39236c6e 432 return EBADEXEC;
0a7de745 433 }
39236c6e
A
434 nPages += scount;
435 scatter++;
436
437 /* XXX check that basees doesn't overlap */
438 /* XXX check that targetOffset doesn't overlap */
439 }
440#if 0 /* rdar://12579439 */
0a7de745 441 if (nPages != ntohl(cd->nCodeSlots)) {
39236c6e 442 return EBADEXEC;
0a7de745 443 }
39236c6e
A
444#endif
445 }
446
0a7de745 447 if (length < ntohl(cd->identOffset)) {
39236c6e 448 return EBADEXEC;
0a7de745 449 }
39236c6e
A
450
451 /* identifier is NUL terminated string */
452 if (cd->identOffset) {
3e170ce0 453 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
0a7de745 454 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
39236c6e 455 return EBADEXEC;
0a7de745 456 }
39236c6e
A
457 }
458
fe8ab488
A
459 /* team identifier is NULL terminated string */
460 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
0a7de745 461 if (length < ntohl(cd->teamOffset)) {
fe8ab488 462 return EBADEXEC;
0a7de745 463 }
fe8ab488 464
3e170ce0 465 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
0a7de745 466 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
fe8ab488 467 return EBADEXEC;
0a7de745 468 }
fe8ab488
A
469 }
470
f427ee49
A
471 /* linkage is variable length binary data */
472 if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
473 const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
474 const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
475
476 if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
477 return EBADEXEC;
478 }
479 }
480
481
39236c6e
A
482 return 0;
483}
484
485/*
486 *
487 */
488
489static int
490cs_validate_blob(const CS_GenericBlob *blob, size_t length)
491{
0a7de745 492 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
39236c6e 493 return EBADEXEC;
0a7de745 494 }
39236c6e
A
495 return 0;
496}
497
498/*
499 * cs_validate_csblob
500 *
501 * Validate that superblob/embedded code directory to make sure that
502 * all internal pointers are valid.
503 *
504 * Will validate both a superblob csblob and a "raw" code directory.
505 *
506 *
507 * Parameters: buffer Pointer to code signature
508 * length Length of buffer
509 * rcd returns pointer to code directory
510 *
511 * Returns: 0 Success
512 * EBADEXEC Invalid code signature
513 */
514
515static int
813fb2f6
A
516cs_validate_csblob(
517 const uint8_t *addr,
d9a64523 518 const size_t blob_size,
813fb2f6
A
519 const CS_CodeDirectory **rcd,
520 const CS_GenericBlob **rentitlements)
39236c6e 521{
813fb2f6 522 const CS_GenericBlob *blob;
39236c6e 523 int error;
d9a64523 524 size_t length;
39236c6e
A
525
526 *rcd = NULL;
39037602 527 *rentitlements = NULL;
39236c6e 528
813fb2f6 529 blob = (const CS_GenericBlob *)(const void *)addr;
813fb2f6
A
530
531 length = blob_size;
39236c6e 532 error = cs_validate_blob(blob, length);
0a7de745 533 if (error) {
39236c6e 534 return error;
0a7de745 535 }
39236c6e
A
536 length = ntohl(blob->length);
537
538 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
490019cf
A
539 const CS_SuperBlob *sb;
540 uint32_t n, count;
541 const CS_CodeDirectory *best_cd = NULL;
542 unsigned int best_rank = 0;
5ba3f43e
A
543#if PLATFORM_WatchOS
544 const CS_CodeDirectory *sha1_cd = NULL;
545#endif
39236c6e 546
0a7de745 547 if (length < sizeof(CS_SuperBlob)) {
39236c6e 548 return EBADEXEC;
0a7de745 549 }
39236c6e 550
490019cf
A
551 sb = (const CS_SuperBlob *)blob;
552 count = ntohl(sb->count);
553
39236c6e 554 /* check that the array of BlobIndex fits in the rest of the data */
0a7de745 555 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
39236c6e 556 return EBADEXEC;
0a7de745 557 }
39236c6e
A
558
559 /* now check each BlobIndex */
560 for (n = 0; n < count; n++) {
561 const CS_BlobIndex *blobIndex = &sb->index[n];
490019cf
A
562 uint32_t type = ntohl(blobIndex->type);
563 uint32_t offset = ntohl(blobIndex->offset);
0a7de745 564 if (length < offset) {
39236c6e 565 return EBADEXEC;
0a7de745 566 }
39236c6e
A
567
568 const CS_GenericBlob *subBlob =
0a7de745 569 (const CS_GenericBlob *)(const void *)(addr + offset);
39236c6e 570
490019cf 571 size_t subLength = length - offset;
39236c6e 572
0a7de745 573 if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
39236c6e 574 return error;
0a7de745 575 }
39236c6e
A
576 subLength = ntohl(subBlob->length);
577
578 /* extra validation for CDs, that is also returned */
490019cf
A
579 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
580 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
0a7de745 581 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
39236c6e 582 return error;
0a7de745 583 }
490019cf 584 unsigned int rank = hash_rank(candidate);
0a7de745 585 if (cs_debug > 3) {
490019cf 586 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
0a7de745 587 }
490019cf
A
588 if (best_cd == NULL || rank > best_rank) {
589 best_cd = candidate;
590 best_rank = rank;
39037602 591
0a7de745 592 if (cs_debug > 2) {
39037602 593 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
0a7de745 594 }
39037602 595 *rcd = best_cd;
490019cf
A
596 } else if (best_cd != NULL && rank == best_rank) {
597 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
39037602
A
598 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
599 return EBADEXEC;
600 }
5ba3f43e
A
601#if PLATFORM_WatchOS
602 if (candidate->hashType == CS_HASHTYPE_SHA1) {
603 if (sha1_cd != NULL) {
604 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
605 return EBADEXEC;
606 }
607 sha1_cd = candidate;
608 }
609#endif
39037602
A
610 } else if (type == CSSLOT_ENTITLEMENTS) {
611 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
612 return EBADEXEC;
613 }
614 if (*rentitlements != NULL) {
615 printf("multiple entitlements blobs\n");
490019cf
A
616 return EBADEXEC;
617 }
39037602 618 *rentitlements = subBlob;
39236c6e
A
619 }
620 }
621
5ba3f43e
A
622#if PLATFORM_WatchOS
623 /* To keep watchOS fast enough, we have to resort to sha1 for
624 * some code.
625 *
626 * At the time of writing this comment, known sha1 attacks are
627 * collision attacks (not preimage or second preimage
628 * attacks), which do not apply to platform binaries since
629 * they have a fixed hash in the trust cache. Given this
630 * property, we only prefer sha1 code directories for adhoc
631 * signatures, which always have to be in a trust cache to be
632 * valid (can-load-cdhash does not exist for watchOS). Those
633 * are, incidentally, also the platform binaries, for which we
634 * care about the performance hit that sha256 would bring us.
635 *
636 * Platform binaries may still contain a (not chosen) sha256
637 * code directory, which keeps software updates that switch to
638 * sha256-only small.
639 */
640
641 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
642 if (sha1_cd->flags != (*rcd)->flags) {
643 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
0a7de745 644 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
5ba3f43e
A
645 *rcd = NULL;
646 return EBADEXEC;
647 }
648
649 *rcd = sha1_cd;
650 }
651#endif
39236c6e 652 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
0a7de745 653 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
39236c6e 654 return error;
0a7de745 655 }
39236c6e
A
656 *rcd = (const CS_CodeDirectory *)blob;
657 } else {
658 return EBADEXEC;
659 }
660
0a7de745 661 if (*rcd == NULL) {
39236c6e 662 return EBADEXEC;
0a7de745 663 }
39236c6e
A
664
665 return 0;
666}
667
668/*
669 * cs_find_blob_bytes
670 *
671 * Find an blob from the superblob/code directory. The blob must have
672 * been been validated by cs_validate_csblob() before calling
3e170ce0 673 * this. Use csblob_find_blob() instead.
0a7de745 674 *
39236c6e
A
675 * Will also find a "raw" code directory if its stored as well as
676 * searching the superblob.
677 *
678 * Parameters: buffer Pointer to code signature
679 * length Length of buffer
680 * type type of blob to find
681 * magic the magic number for that blob
682 *
683 * Returns: pointer Success
684 * NULL Buffer not found
685 */
686
3e170ce0
A
687const CS_GenericBlob *
688csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
39236c6e 689{
3e170ce0 690 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
39236c6e
A
691
692 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
693 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
694 size_t n, count = ntohl(sb->count);
695
696 for (n = 0; n < count; n++) {
0a7de745 697 if (ntohl(sb->index[n].type) != type) {
39236c6e 698 continue;
0a7de745 699 }
39236c6e 700 uint32_t offset = ntohl(sb->index[n].offset);
0a7de745 701 if (length - sizeof(const CS_GenericBlob) < offset) {
39236c6e 702 return NULL;
0a7de745 703 }
3e170ce0 704 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
0a7de745 705 if (ntohl(blob->magic) != magic) {
39236c6e 706 continue;
0a7de745 707 }
39236c6e
A
708 return blob;
709 }
710 } else if (type == CSSLOT_CODEDIRECTORY
0a7de745
A
711 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
712 && magic == CSMAGIC_CODEDIRECTORY) {
39236c6e 713 return blob;
0a7de745 714 }
39236c6e
A
715 return NULL;
716}
717
718
fe8ab488 719const CS_GenericBlob *
3e170ce0 720csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
39236c6e 721{
0a7de745 722 if ((csblob->csb_flags & CS_VALID) == 0) {
39236c6e 723 return NULL;
0a7de745 724 }
3e170ce0 725 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
39236c6e
A
726}
727
728static const uint8_t *
3e170ce0 729find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
39236c6e
A
730{
731 /* there is no zero special slot since that is the first code slot */
0a7de745 732 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
39236c6e 733 return NULL;
0a7de745 734 }
39236c6e 735
0a7de745 736 return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
39236c6e
A
737}
738
3e170ce0 739static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
39236c6e 740
6d2010ae 741int
3e170ce0 742csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
6d2010ae 743{
3e170ce0 744 uint8_t computed_hash[CS_HASH_MAX_SIZE];
39236c6e
A
745 const CS_GenericBlob *entitlements;
746 const CS_CodeDirectory *code_dir;
39236c6e 747 const uint8_t *embedded_hash;
3e170ce0 748 union cs_hash_union context;
39236c6e
A
749
750 *out_start = NULL;
751 *out_length = 0;
752
0a7de745
A
753 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
754 return EBADEXEC;
755 }
39236c6e 756
490019cf 757 code_dir = csblob->csb_cd;
39236c6e 758
39037602
A
759 if ((csblob->csb_flags & CS_VALID) == 0) {
760 entitlements = NULL;
761 } else {
762 entitlements = csblob->csb_entitlements_blob;
763 }
3e170ce0 764 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
39236c6e
A
765
766 if (embedded_hash == NULL) {
0a7de745 767 if (entitlements) {
39236c6e 768 return EBADEXEC;
0a7de745 769 }
39236c6e 770 return 0;
490019cf
A
771 } else if (entitlements == NULL) {
772 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
773 return EBADEXEC;
774 } else {
775 return 0;
776 }
6d2010ae 777 }
39236c6e 778
3e170ce0 779 csblob->csb_hashtype->cs_init(&context);
c3c9b80d
A
780 ptrauth_utils_auth_blob_generic(entitlements,
781 ntohl(entitlements->length),
782 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
783 PTRAUTH_ADDR_DIVERSIFY,
784 csblob->csb_entitlements_blob_signature);
3e170ce0
A
785 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
786 csblob->csb_hashtype->cs_final(computed_hash, &context);
787
0a7de745 788 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
39236c6e 789 return EBADEXEC;
0a7de745 790 }
39236c6e 791
3e170ce0 792 *out_start = __DECONST(void *, entitlements);
39236c6e
A
793 *out_length = ntohl(entitlements->length);
794
795 return 0;
796}
797
6d2010ae 798/*
3e170ce0
A
799 * CODESIGNING
800 * End of routines to navigate code signing data structures in the kernel.
6d2010ae
A
801 */
802
803
2d21ac55 804
1c79356b 805/*
2d21ac55
A
806 * ubc_info_init
807 *
808 * Allocate and attach an empty ubc_info structure to a vnode
809 *
810 * Parameters: vp Pointer to the vnode
811 *
812 * Returns: 0 Success
813 * vnode_size:ENOMEM Not enough space
814 * vnode_size:??? Other error from vnode_getattr
815 *
1c79356b
A
816 */
817int
818ubc_info_init(struct vnode *vp)
91447636 819{
0a7de745 820 return ubc_info_init_internal(vp, 0, 0);
91447636 821}
2d21ac55
A
822
823
824/*
825 * ubc_info_init_withsize
826 *
827 * Allocate and attach a sized ubc_info structure to a vnode
828 *
829 * Parameters: vp Pointer to the vnode
830 * filesize The size of the file
831 *
832 * Returns: 0 Success
833 * vnode_size:ENOMEM Not enough space
834 * vnode_size:??? Other error from vnode_getattr
835 */
91447636
A
836int
837ubc_info_init_withsize(struct vnode *vp, off_t filesize)
838{
0a7de745 839 return ubc_info_init_internal(vp, 1, filesize);
91447636
A
840}
841
2d21ac55
A
842
843/*
844 * ubc_info_init_internal
845 *
846 * Allocate and attach a ubc_info structure to a vnode
847 *
848 * Parameters: vp Pointer to the vnode
849 * withfsize{0,1} Zero if the size should be obtained
850 * from the vnode; otherwise, use filesize
851 * filesize The size of the file, if withfsize == 1
852 *
853 * Returns: 0 Success
854 * vnode_size:ENOMEM Not enough space
855 * vnode_size:??? Other error from vnode_getattr
856 *
857 * Notes: We call a blocking zalloc(), and the zone was created as an
858 * expandable and collectable zone, so if no memory is available,
859 * it is possible for zalloc() to block indefinitely. zalloc()
860 * may also panic if the zone of zones is exhausted, since it's
861 * NOT expandable.
862 *
863 * We unconditionally call vnode_pager_setup(), even if this is
864 * a reuse of a ubc_info; in that case, we should probably assert
865 * that it does not already have a pager association, but do not.
866 *
867 * Since memory_object_create_named() can only fail from receiving
868 * an invalid pager argument, the explicit check and panic is
869 * merely precautionary.
870 */
871static int
872ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1c79356b 873{
0a7de745 874 struct ubc_info *uip;
1c79356b 875 void * pager;
1c79356b
A
876 int error = 0;
877 kern_return_t kret;
0b4e3aa0 878 memory_object_control_t control;
1c79356b 879
91447636 880 uip = vp->v_ubcinfo;
1c79356b 881
2d21ac55
A
882 /*
883 * If there is not already a ubc_info attached to the vnode, we
884 * attach one; otherwise, we will reuse the one that's there.
885 */
91447636 886 if (uip == UBC_INFO_NULL) {
1c79356b 887 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
888 bzero((char *)uip, sizeof(struct ubc_info));
889
1c79356b 890 uip->ui_vnode = vp;
91447636 891 uip->ui_flags = UI_INITED;
1c79356b
A
892 uip->ui_ucred = NOCRED;
893 }
1c79356b
A
894 assert(uip->ui_flags != UI_NONE);
895 assert(uip->ui_vnode == vp);
896
1c79356b
A
897 /* now set this ubc_info in the vnode */
898 vp->v_ubcinfo = uip;
91447636 899
2d21ac55
A
900 /*
901 * Allocate a pager object for this vnode
902 *
903 * XXX The value of the pager parameter is currently ignored.
904 * XXX Presumably, this API changed to avoid the race between
905 * XXX setting the pager and the UI_HASPAGER flag.
906 */
1c79356b
A
907 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
908 assert(pager);
91447636 909
2d21ac55
A
910 /*
911 * Explicitly set the pager into the ubc_info, after setting the
912 * UI_HASPAGER flag.
913 */
91447636
A
914 SET(uip->ui_flags, UI_HASPAGER);
915 uip->ui_pager = pager;
1c79356b
A
916
917 /*
91447636 918 * Note: We can not use VNOP_GETATTR() to get accurate
2d21ac55
A
919 * value of ui_size because this may be an NFS vnode, and
920 * nfs_getattr() can call vinvalbuf(); if this happens,
921 * ubc_info is not set up to deal with that event.
1c79356b
A
922 * So use bogus size.
923 */
924
1c79356b 925 /*
0b4e3aa0
A
926 * create a vnode - vm_object association
927 * memory_object_create_named() creates a "named" reference on the
928 * memory object we hold this reference as long as the vnode is
929 * "alive." Since memory_object_create_named() took its own reference
930 * on the vnode pager we passed it, we can drop the reference
931 * vnode_pager_setup() returned here.
1c79356b 932 */
0b4e3aa0 933 kret = memory_object_create_named(pager,
0a7de745
A
934 (memory_object_size_t)uip->ui_size, &control);
935 vnode_pager_deallocate(pager);
936 if (kret != KERN_SUCCESS) {
0b4e3aa0 937 panic("ubc_info_init: memory_object_create_named returned %d", kret);
0a7de745 938 }
1c79356b 939
0b4e3aa0 940 assert(control);
0a7de745
A
941 uip->ui_control = control; /* cache the value of the mo control */
942 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
2d21ac55 943
91447636 944 if (withfsize == 0) {
91447636 945 /* initialize the size */
2d21ac55 946 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
0a7de745 947 if (error) {
91447636 948 uip->ui_size = 0;
0a7de745 949 }
91447636
A
950 } else {
951 uip->ui_size = filesize;
952 }
0a7de745 953 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
1c79356b 954
0a7de745 955 return error;
1c79356b
A
956}
957
2d21ac55
A
958
959/*
960 * ubc_info_free
961 *
962 * Free a ubc_info structure
963 *
964 * Parameters: uip A pointer to the ubc_info to free
965 *
966 * Returns: (void)
967 *
968 * Notes: If there is a credential that has subsequently been associated
969 * with the ubc_info via a call to ubc_setcred(), the reference
970 * to the credential is dropped.
971 *
972 * It's actually impossible for a ubc_info.ui_control to take the
973 * value MEMORY_OBJECT_CONTROL_NULL.
974 */
0b4e3aa0
A
975static void
976ubc_info_free(struct ubc_info *uip)
1c79356b 977{
0c530ab8
A
978 if (IS_VALID_CRED(uip->ui_ucred)) {
979 kauth_cred_unref(&uip->ui_ucred);
1c79356b 980 }
0b4e3aa0 981
0a7de745 982 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 983 memory_object_control_deallocate(uip->ui_control);
0a7de745
A
984 }
985
91447636 986 cluster_release(uip);
2d21ac55 987 ubc_cs_free(uip);
0b4e3aa0 988
2d21ac55 989 zfree(ubc_info_zone, uip);
1c79356b
A
990 return;
991}
992
2d21ac55 993
0b4e3aa0
A
994void
995ubc_info_deallocate(struct ubc_info *uip)
996{
0a7de745 997 ubc_info_free(uip);
0b4e3aa0
A
998}
999
0a7de745
A
1000errno_t
1001mach_to_bsd_errno(kern_return_t mach_err)
fe8ab488
A
1002{
1003 switch (mach_err) {
1004 case KERN_SUCCESS:
1005 return 0;
1006
1007 case KERN_INVALID_ADDRESS:
1008 case KERN_INVALID_ARGUMENT:
1009 case KERN_NOT_IN_SET:
1010 case KERN_INVALID_NAME:
1011 case KERN_INVALID_TASK:
1012 case KERN_INVALID_RIGHT:
1013 case KERN_INVALID_VALUE:
1014 case KERN_INVALID_CAPABILITY:
1015 case KERN_INVALID_HOST:
1016 case KERN_MEMORY_PRESENT:
1017 case KERN_INVALID_PROCESSOR_SET:
1018 case KERN_INVALID_POLICY:
1019 case KERN_ALREADY_WAITING:
1020 case KERN_DEFAULT_SET:
1021 case KERN_EXCEPTION_PROTECTED:
1022 case KERN_INVALID_LEDGER:
1023 case KERN_INVALID_MEMORY_CONTROL:
1024 case KERN_INVALID_SECURITY:
1025 case KERN_NOT_DEPRESSED:
1026 case KERN_LOCK_OWNED:
1027 case KERN_LOCK_OWNED_SELF:
1028 return EINVAL;
1029
1030 case KERN_PROTECTION_FAILURE:
1031 case KERN_NOT_RECEIVER:
1032 case KERN_NO_ACCESS:
1033 case KERN_POLICY_STATIC:
1034 return EACCES;
1035
1036 case KERN_NO_SPACE:
1037 case KERN_RESOURCE_SHORTAGE:
1038 case KERN_UREFS_OVERFLOW:
1039 case KERN_INVALID_OBJECT:
1040 return ENOMEM;
1041
1042 case KERN_FAILURE:
1043 return EIO;
1044
1045 case KERN_MEMORY_FAILURE:
1046 case KERN_POLICY_LIMIT:
1047 case KERN_CODESIGN_ERROR:
1048 return EPERM;
1049
1050 case KERN_MEMORY_ERROR:
1051 return EBUSY;
1052
1053 case KERN_ALREADY_IN_SET:
1054 case KERN_NAME_EXISTS:
1055 case KERN_RIGHT_EXISTS:
1056 return EEXIST;
1057
1058 case KERN_ABORTED:
1059 return EINTR;
1060
1061 case KERN_TERMINATED:
1062 case KERN_LOCK_SET_DESTROYED:
1063 case KERN_LOCK_UNSTABLE:
1064 case KERN_SEMAPHORE_DESTROYED:
1065 return ENOENT;
1066
1067 case KERN_RPC_SERVER_TERMINATED:
1068 return ECONNRESET;
1069
1070 case KERN_NOT_SUPPORTED:
1071 return ENOTSUP;
1072
1073 case KERN_NODE_DOWN:
1074 return ENETDOWN;
1075
1076 case KERN_NOT_WAITING:
1077 return ENOENT;
1078
1079 case KERN_OPERATION_TIMED_OUT:
1080 return ETIMEDOUT;
1081
1082 default:
1083 return EIO;
1084 }
1085}
2d21ac55 1086
1c79356b 1087/*
fe8ab488 1088 * ubc_setsize_ex
2d21ac55 1089 *
fe8ab488 1090 * Tell the VM that the the size of the file represented by the vnode has
2d21ac55
A
1091 * changed
1092 *
fe8ab488
A
1093 * Parameters: vp The vp whose backing file size is
1094 * being changed
1095 * nsize The new size of the backing file
1096 * opts Options
1097 *
1098 * Returns: EINVAL for new size < 0
1099 * ENOENT if no UBC info exists
1100 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1101 * Other errors (mapped to errno_t) returned by VM functions
1102 *
1103 * Notes: This function will indicate success if the new size is the
1104 * same or larger than the old size (in this case, the
1105 * remainder of the file will require modification or use of
1106 * an existing upl to access successfully).
1107 *
1108 * This function will fail if the new file size is smaller,
1109 * and the memory region being invalidated was unable to
1110 * actually be invalidated and/or the last page could not be
1111 * flushed, if the new size is not aligned to a page
1112 * boundary. This is usually indicative of an I/O error.
1c79356b 1113 */
0a7de745
A
1114errno_t
1115ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1c79356b 1116{
0a7de745 1117 off_t osize; /* ui_size before change */
1c79356b
A
1118 off_t lastpg, olastpgend, lastoff;
1119 struct ubc_info *uip;
0b4e3aa0 1120 memory_object_control_t control;
2d21ac55 1121 kern_return_t kret = KERN_SUCCESS;
1c79356b 1122
0a7de745 1123 if (nsize < (off_t)0) {
fe8ab488 1124 return EINVAL;
0a7de745 1125 }
1c79356b 1126
0a7de745 1127 if (!UBCINFOEXISTS(vp)) {
fe8ab488 1128 return ENOENT;
0a7de745 1129 }
1c79356b
A
1130
1131 uip = vp->v_ubcinfo;
2d21ac55 1132 osize = uip->ui_size;
fe8ab488 1133
0a7de745 1134 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
fe8ab488 1135 return EAGAIN;
0a7de745 1136 }
fe8ab488 1137
2d21ac55
A
1138 /*
1139 * Update the size before flushing the VM
1140 */
1c79356b
A
1141 uip->ui_size = nsize;
1142
0a7de745 1143 if (nsize >= osize) { /* Nothing more to do */
6d2010ae
A
1144 if (nsize > osize) {
1145 lock_vnode_and_post(vp, NOTE_EXTEND);
1146 }
1147
fe8ab488 1148 return 0;
b0d623f7 1149 }
1c79356b
A
1150
1151 /*
1152 * When the file shrinks, invalidate the pages beyond the
1153 * new size. Also get rid of garbage beyond nsize on the
2d21ac55
A
1154 * last page. The ui_size already has the nsize, so any
1155 * subsequent page-in will zero-fill the tail properly
1c79356b 1156 */
1c79356b
A
1157 lastpg = trunc_page_64(nsize);
1158 olastpgend = round_page_64(osize);
0b4e3aa0
A
1159 control = uip->ui_control;
1160 assert(control);
1c79356b
A
1161 lastoff = (nsize & PAGE_MASK_64);
1162
2d21ac55 1163 if (lastoff) {
0a7de745
A
1164 upl_t upl;
1165 upl_page_info_t *pl;
2d21ac55 1166
fe8ab488 1167 /*
2d21ac55 1168 * new EOF ends up in the middle of a page
fe8ab488 1169 * zero the tail of this page if it's currently
2d21ac55
A
1170 * present in the cache
1171 */
b226f5e5 1172 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
fe8ab488 1173
0a7de745
A
1174 if (kret != KERN_SUCCESS) {
1175 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1176 }
2d21ac55 1177
0a7de745
A
1178 if (upl_valid_page(pl, 0)) {
1179 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1180 }
2d21ac55
A
1181
1182 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 1183
2d21ac55
A
1184 lastpg += PAGE_SIZE_64;
1185 }
1186 if (olastpgend > lastpg) {
0a7de745 1187 int flags;
b0d623f7 1188
0a7de745 1189 if (lastpg == 0) {
b0d623f7 1190 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
0a7de745 1191 } else {
b0d623f7 1192 flags = MEMORY_OBJECT_DATA_FLUSH;
0a7de745 1193 }
fe8ab488 1194 /*
2d21ac55
A
1195 * invalidate the pages beyond the new EOF page
1196 *
1197 */
fe8ab488 1198 kret = memory_object_lock_request(control,
0a7de745
A
1199 (memory_object_offset_t)lastpg,
1200 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1201 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1202 if (kret != KERN_SUCCESS) {
1203 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1204 }
2d21ac55 1205 }
fe8ab488 1206 return mach_to_bsd_errno(kret);
1c79356b
A
1207}
1208
fe8ab488 1209// Returns true for success
0a7de745
A
1210int
1211ubc_setsize(vnode_t vp, off_t nsize)
fe8ab488
A
1212{
1213 return ubc_setsize_ex(vp, nsize, 0) == 0;
1214}
2d21ac55 1215
1c79356b 1216/*
2d21ac55
A
1217 * ubc_getsize
1218 *
1219 * Get the size of the file assocated with the specified vnode
1220 *
1221 * Parameters: vp The vnode whose size is of interest
1222 *
1223 * Returns: 0 There is no ubc_info associated with
1224 * this vnode, or the size is zero
1225 * !0 The size of the file
1226 *
1227 * Notes: Using this routine, it is not possible for a caller to
1228 * successfully distinguish between a vnode associate with a zero
1229 * length file, and a vnode with no associated ubc_info. The
1230 * caller therefore needs to not care, or needs to ensure that
1231 * they have previously successfully called ubc_info_init() or
1232 * ubc_info_init_withsize().
1c79356b
A
1233 */
1234off_t
1235ubc_getsize(struct vnode *vp)
1236{
91447636 1237 /* people depend on the side effect of this working this way
0a7de745 1238 * as they call this for directory
1c79356b 1239 */
0a7de745
A
1240 if (!UBCINFOEXISTS(vp)) {
1241 return (off_t)0;
1242 }
1243 return vp->v_ubcinfo->ui_size;
1c79356b
A
1244}
1245
2d21ac55 1246
1c79356b 1247/*
2d21ac55
A
1248 * ubc_umount
1249 *
fe8ab488 1250 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
2d21ac55
A
1251 * mount point
1252 *
1253 * Parameters: mp The mount point
1254 *
1255 * Returns: 0 Success
1256 *
1257 * Notes: There is no failure indication for this function.
1258 *
1259 * This function is used in the unmount path; since it may block
1260 * I/O indefinitely, it should not be used in the forced unmount
1261 * path, since a device unavailability could also block that
1262 * indefinitely.
1263 *
1264 * Because there is no device ejection interlock on USB, FireWire,
1265 * or similar devices, it's possible that an ejection that begins
1266 * subsequent to the vnode_iterate() completing, either on one of
1267 * those devices, or a network mount for which the server quits
1268 * responding, etc., may cause the caller to block indefinitely.
1c79356b 1269 */
0b4e3aa0 1270__private_extern__ int
1c79356b
A
1271ubc_umount(struct mount *mp)
1272{
91447636 1273 vnode_iterate(mp, 0, ubc_umcallback, 0);
0a7de745 1274 return 0;
1c79356b
A
1275}
1276
2d21ac55
A
1277
1278/*
1279 * ubc_umcallback
1280 *
1281 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1282 * and vnode_iterate() for details of implementation.
1283 */
91447636
A
1284static int
1285ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 1286{
91447636 1287 if (UBCINFOEXISTS(vp)) {
91447636 1288 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 1289 }
0a7de745 1290 return VNODE_RETURNED;
1c79356b
A
1291}
1292
91447636 1293
2d21ac55
A
1294/*
1295 * ubc_getcred
1296 *
1297 * Get the credentials currently active for the ubc_info associated with the
1298 * vnode.
1299 *
1300 * Parameters: vp The vnode whose ubc_info credentials
1301 * are to be retrieved
1302 *
1303 * Returns: !NOCRED The credentials
1304 * NOCRED If there is no ubc_info for the vnode,
1305 * or if there is one, but it has not had
1306 * any credentials associated with it via
1307 * a call to ubc_setcred()
1308 */
91447636 1309kauth_cred_t
1c79356b
A
1310ubc_getcred(struct vnode *vp)
1311{
0a7de745
A
1312 if (UBCINFOEXISTS(vp)) {
1313 return vp->v_ubcinfo->ui_ucred;
1314 }
1c79356b 1315
0a7de745 1316 return NOCRED;
1c79356b
A
1317}
1318
2d21ac55
A
1319
1320/*
1321 * ubc_setthreadcred
1322 *
1323 * If they are not already set, set the credentials of the ubc_info structure
1324 * associated with the vnode to those of the supplied thread; otherwise leave
1325 * them alone.
1326 *
1327 * Parameters: vp The vnode whose ubc_info creds are to
1328 * be set
1329 * p The process whose credentials are to
1330 * be used, if not running on an assumed
1331 * credential
1332 * thread The thread whose credentials are to
1333 * be used
1334 *
1335 * Returns: 1 This vnode has no associated ubc_info
1336 * 0 Success
1337 *
1338 * Notes: This function takes a proc parameter to account for bootstrap
1339 * issues where a task or thread may call this routine, either
1340 * before credentials have been initialized by bsd_init(), or if
1341 * there is no BSD info asscoiate with a mach thread yet. This
1342 * is known to happen in both the initial swap and memory mapping
1343 * calls.
1344 *
1345 * This function is generally used only in the following cases:
1346 *
1347 * o a memory mapped file via the mmap() system call
2d21ac55
A
1348 * o a swap store backing file
1349 * o subsequent to a successful write via vn_write()
1350 *
1351 * The information is then used by the NFS client in order to
1352 * cons up a wire message in either the page-in or page-out path.
1353 *
1354 * There are two potential problems with the use of this API:
1355 *
1356 * o Because the write path only set it on a successful
1357 * write, there is a race window between setting the
1358 * credential and its use to evict the pages to the
1359 * remote file server
1360 *
1361 * o Because a page-in may occur prior to a write, the
1362 * credential may not be set at this time, if the page-in
fe8ab488 1363 * is not the result of a mapping established via mmap().
2d21ac55
A
1364 *
1365 * In both these cases, this will be triggered from the paging
1366 * path, which will instead use the credential of the current
1367 * process, which in this case is either the dynamic_pager or
1368 * the kernel task, both of which utilize "root" credentials.
1369 *
1370 * This may potentially permit operations to occur which should
1371 * be denied, or it may cause to be denied operations which
1372 * should be permitted, depending on the configuration of the NFS
1373 * server.
1374 */
13fec989 1375int
2d21ac55 1376ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
13fec989
A
1377{
1378 struct ubc_info *uip;
1379 kauth_cred_t credp;
2d21ac55 1380 struct uthread *uthread = get_bsdthread_info(thread);
13fec989 1381
0a7de745
A
1382 if (!UBCINFOEXISTS(vp)) {
1383 return 1;
1384 }
13fec989
A
1385
1386 vnode_lock(vp);
1387
1388 uip = vp->v_ubcinfo;
1389 credp = uip->ui_ucred;
1390
0c530ab8 1391 if (!IS_VALID_CRED(credp)) {
13fec989
A
1392 /* use per-thread cred, if assumed identity, else proc cred */
1393 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1394 uip->ui_ucred = kauth_cred_proc_ref(p);
1395 } else {
1396 uip->ui_ucred = uthread->uu_ucred;
1397 kauth_cred_ref(uip->ui_ucred);
1398 }
0a7de745 1399 }
13fec989
A
1400 vnode_unlock(vp);
1401
0a7de745 1402 return 0;
13fec989
A
1403}
1404
2d21ac55 1405
1c79356b 1406/*
2d21ac55
A
1407 * ubc_setcred
1408 *
1409 * If they are not already set, set the credentials of the ubc_info structure
1410 * associated with the vnode to those of the process; otherwise leave them
1411 * alone.
1412 *
1413 * Parameters: vp The vnode whose ubc_info creds are to
1414 * be set
1415 * p The process whose credentials are to
1416 * be used
1417 *
1418 * Returns: 0 This vnode has no associated ubc_info
1419 * 1 Success
1420 *
1421 * Notes: The return values for this function are inverted from nearly
1422 * all other uses in the kernel.
1423 *
1424 * See also ubc_setthreadcred(), above.
1425 *
1426 * This function is considered deprecated, and generally should
1427 * not be used, as it is incompatible with per-thread credentials;
1428 * it exists for legacy KPI reasons.
1429 *
0a7de745 1430 * DEPRECATION: ubc_setcred() is being deprecated. Please use
2d21ac55 1431 * ubc_setthreadcred() instead.
1c79356b 1432 */
1c79356b 1433int
2d21ac55 1434ubc_setcred(struct vnode *vp, proc_t p)
1c79356b
A
1435{
1436 struct ubc_info *uip;
91447636 1437 kauth_cred_t credp;
1c79356b 1438
2d21ac55 1439 /* If there is no ubc_info, deny the operation */
0a7de745
A
1440 if (!UBCINFOEXISTS(vp)) {
1441 return 0;
1442 }
1c79356b 1443
2d21ac55
A
1444 /*
1445 * Check to see if there is already a credential reference in the
1446 * ubc_info; if there is not, take one on the supplied credential.
1447 */
91447636 1448 vnode_lock(vp);
91447636 1449 uip = vp->v_ubcinfo;
1c79356b 1450 credp = uip->ui_ucred;
0c530ab8 1451 if (!IS_VALID_CRED(credp)) {
91447636 1452 uip->ui_ucred = kauth_cred_proc_ref(p);
0a7de745 1453 }
91447636 1454 vnode_unlock(vp);
1c79356b 1455
0a7de745 1456 return 1;
1c79356b
A
1457}
1458
2d21ac55
A
1459/*
1460 * ubc_getpager
1461 *
1462 * Get the pager associated with the ubc_info associated with the vnode.
1463 *
1464 * Parameters: vp The vnode to obtain the pager from
1465 *
1466 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1467 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1468 *
1469 * Notes: For each vnode that has a ubc_info associated with it, that
1470 * ubc_info SHALL have a pager associated with it, so in the
1471 * normal case, it's impossible to return VNODE_PAGER_NULL for
1472 * a vnode with an associated ubc_info.
1473 */
0b4e3aa0 1474__private_extern__ memory_object_t
1c79356b
A
1475ubc_getpager(struct vnode *vp)
1476{
0a7de745
A
1477 if (UBCINFOEXISTS(vp)) {
1478 return vp->v_ubcinfo->ui_pager;
1479 }
1c79356b 1480
0a7de745 1481 return 0;
1c79356b
A
1482}
1483
2d21ac55 1484
1c79356b 1485/*
2d21ac55
A
1486 * ubc_getobject
1487 *
1488 * Get the memory object control associated with the ubc_info associated with
1489 * the vnode
1490 *
1491 * Parameters: vp The vnode to obtain the memory object
1492 * from
1493 * flags DEPRECATED
1494 *
1495 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1496 * MEMORY_OBJECT_CONTROL_NULL
1497 *
1498 * Notes: Historically, if the flags were not "do not reactivate", this
1499 * function would look up the memory object using the pager if
1500 * it did not exist (this could be the case if the vnode had
1501 * been previously reactivated). The flags would also permit a
1502 * hold to be requested, which would have created an object
1503 * reference, if one had not already existed. This usage is
1504 * deprecated, as it would permit a race between finding and
1505 * taking the reference vs. a single reference being dropped in
1506 * another thread.
1c79356b 1507 */
0b4e3aa0 1508memory_object_control_t
91447636 1509ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 1510{
0a7de745
A
1511 if (UBCINFOEXISTS(vp)) {
1512 return vp->v_ubcinfo->ui_control;
1513 }
1c79356b 1514
0a7de745 1515 return MEMORY_OBJECT_CONTROL_NULL;
1c79356b
A
1516}
1517
2d21ac55
A
1518/*
1519 * ubc_blktooff
1520 *
1521 * Convert a given block number to a memory backing object (file) offset for a
1522 * given vnode
1523 *
1524 * Parameters: vp The vnode in which the block is located
1525 * blkno The block number to convert
1526 *
1527 * Returns: !-1 The offset into the backing object
1528 * -1 There is no ubc_info associated with
1529 * the vnode
1530 * -1 An error occurred in the underlying VFS
1531 * while translating the block to an
1532 * offset; the most likely cause is that
1533 * the caller specified a block past the
1534 * end of the file, but this could also be
1535 * any other error from VNOP_BLKTOOFF().
1536 *
1537 * Note: Representing the error in band loses some information, but does
1538 * not occlude a valid offset, since an off_t of -1 is normally
1539 * used to represent EOF. If we had a more reliable constant in
1540 * our header files for it (i.e. explicitly cast to an off_t), we
1541 * would use it here instead.
1542 */
1c79356b 1543off_t
91447636 1544ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b 1545{
2d21ac55 1546 off_t file_offset = -1;
1c79356b
A
1547 int error;
1548
2d21ac55
A
1549 if (UBCINFOEXISTS(vp)) {
1550 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
0a7de745 1551 if (error) {
2d21ac55 1552 file_offset = -1;
0a7de745 1553 }
2d21ac55 1554 }
1c79356b 1555
0a7de745 1556 return file_offset;
1c79356b 1557}
0b4e3aa0 1558
2d21ac55
A
1559
1560/*
1561 * ubc_offtoblk
1562 *
1563 * Convert a given offset in a memory backing object into a block number for a
1564 * given vnode
1565 *
1566 * Parameters: vp The vnode in which the offset is
1567 * located
1568 * offset The offset into the backing object
1569 *
1570 * Returns: !-1 The returned block number
1571 * -1 There is no ubc_info associated with
1572 * the vnode
1573 * -1 An error occurred in the underlying VFS
1574 * while translating the block to an
1575 * offset; the most likely cause is that
1576 * the caller specified a block past the
1577 * end of the file, but this could also be
1578 * any other error from VNOP_OFFTOBLK().
1579 *
1580 * Note: Representing the error in band loses some information, but does
1581 * not occlude a valid block number, since block numbers exceed
1582 * the valid range for offsets, due to their relative sizes. If
1583 * we had a more reliable constant than -1 in our header files
1584 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1585 * here instead.
1586 */
91447636
A
1587daddr64_t
1588ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 1589{
2d21ac55 1590 daddr64_t blkno = -1;
0b4e3aa0 1591 int error = 0;
1c79356b 1592
2d21ac55
A
1593 if (UBCINFOEXISTS(vp)) {
1594 error = VNOP_OFFTOBLK(vp, offset, &blkno);
0a7de745 1595 if (error) {
2d21ac55 1596 blkno = -1;
0a7de745 1597 }
2d21ac55 1598 }
1c79356b 1599
0a7de745 1600 return blkno;
1c79356b
A
1601}
1602
2d21ac55
A
1603
1604/*
1605 * ubc_pages_resident
1606 *
1607 * Determine whether or not a given vnode has pages resident via the memory
1608 * object control associated with the ubc_info associated with the vnode
1609 *
1610 * Parameters: vp The vnode we want to know about
1611 *
1612 * Returns: 1 Yes
1613 * 0 No
1614 */
1c79356b 1615int
91447636 1616ubc_pages_resident(vnode_t vp)
1c79356b 1617{
0a7de745
A
1618 kern_return_t kret;
1619 boolean_t has_pages_resident;
1620
1621 if (!UBCINFOEXISTS(vp)) {
1622 return 0;
1623 }
1624
2d21ac55
A
1625 /*
1626 * The following call may fail if an invalid ui_control is specified,
1627 * or if there is no VM object associated with the control object. In
1628 * either case, reacting to it as if there were no pages resident will
1629 * result in correct behavior.
1630 */
91447636 1631 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
0a7de745
A
1632
1633 if (kret != KERN_SUCCESS) {
1634 return 0;
1635 }
1636
1637 if (has_pages_resident == TRUE) {
1638 return 1;
1639 }
1640
1641 return 0;
91447636 1642}
1c79356b 1643
0b4e3aa0 1644/*
2d21ac55
A
1645 * ubc_msync
1646 *
1647 * Clean and/or invalidate a range in the memory object that backs this vnode
1648 *
1649 * Parameters: vp The vnode whose associated ubc_info's
1650 * associated memory object is to have a
1651 * range invalidated within it
1652 * beg_off The start of the range, as an offset
1653 * end_off The end of the range, as an offset
1654 * resid_off The address of an off_t supplied by the
1655 * caller; may be set to NULL to ignore
1656 * flags See ubc_msync_internal()
1657 *
1658 * Returns: 0 Success
1659 * !0 Failure; an errno is returned
1660 *
1661 * Implicit Returns:
1662 * *resid_off, modified If non-NULL, the contents are ALWAYS
1663 * modified; they are initialized to the
1664 * beg_off, and in case of an I/O error,
1665 * the difference between beg_off and the
1666 * current value will reflect what was
1667 * able to be written before the error
1668 * occurred. If no error is returned, the
1669 * value of the resid_off is undefined; do
1670 * NOT use it in place of end_off if you
1671 * intend to increment from the end of the
1672 * last call and call iteratively.
1673 *
1674 * Notes: see ubc_msync_internal() for more detailed information.
1675 *
0b4e3aa0 1676 */
91447636
A
1677errno_t
1678ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 1679{
0a7de745 1680 int retval;
91447636 1681 int io_errno = 0;
0b4e3aa0 1682
0a7de745
A
1683 if (resid_off) {
1684 *resid_off = beg_off;
1685 }
1686
1687 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 1688
0a7de745
A
1689 if (retval == 0 && io_errno == 0) {
1690 return EINVAL;
1691 }
1692 return io_errno;
91447636 1693}
0b4e3aa0 1694
1c79356b 1695
1c79356b 1696/*
fe8ab488
A
1697 * ubc_msync_internal
1698 *
2d21ac55
A
1699 * Clean and/or invalidate a range in the memory object that backs this vnode
1700 *
1701 * Parameters: vp The vnode whose associated ubc_info's
1702 * associated memory object is to have a
1703 * range invalidated within it
1704 * beg_off The start of the range, as an offset
1705 * end_off The end of the range, as an offset
1706 * resid_off The address of an off_t supplied by the
1707 * caller; may be set to NULL to ignore
1708 * flags MUST contain at least one of the flags
1709 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1710 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1711 * UBC_SYNC may also be specified to cause
1712 * this function to block until the
1713 * operation is complete. The behavior
1714 * of UBC_SYNC is otherwise undefined.
1715 * io_errno The address of an int to contain the
1716 * errno from a failed I/O operation, if
1717 * one occurs; may be set to NULL to
1718 * ignore
1719 *
1720 * Returns: 1 Success
1721 * 0 Failure
1722 *
1723 * Implicit Returns:
1724 * *resid_off, modified The contents of this offset MAY be
1725 * modified; in case of an I/O error, the
1726 * difference between beg_off and the
1727 * current value will reflect what was
1728 * able to be written before the error
1729 * occurred.
1730 * *io_errno, modified The contents of this offset are set to
1731 * an errno, if an error occurs; if the
1732 * caller supplies an io_errno parameter,
1733 * they should be careful to initialize it
1734 * to 0 before calling this function to
1735 * enable them to distinguish an error
1736 * with a valid *resid_off from an invalid
1737 * one, and to avoid potentially falsely
1738 * reporting an error, depending on use.
1739 *
1740 * Notes: If there is no ubc_info associated with the vnode supplied,
1741 * this function immediately returns success.
1742 *
1743 * If the value of end_off is less than or equal to beg_off, this
1744 * function immediately returns success; that is, end_off is NOT
1745 * inclusive.
1746 *
1747 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1748 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1749 * attempt to block on in-progress I/O by calling this function
1750 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1751 * in order to block pending on the I/O already in progress.
1752 *
1753 * The start offset is truncated to the page boundary and the
1754 * size is adjusted to include the last page in the range; that
1755 * is, end_off on exactly a page boundary will not change if it
1756 * is rounded, and the range of bytes written will be from the
1757 * truncate beg_off to the rounded (end_off - 1).
1c79356b 1758 */
91447636
A
1759static int
1760ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 1761{
0a7de745
A
1762 memory_object_size_t tsize;
1763 kern_return_t kret;
91447636
A
1764 int request_flags = 0;
1765 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
0a7de745
A
1766
1767 if (!UBCINFOEXISTS(vp)) {
1768 return 0;
1769 }
1770 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1771 return 0;
1772 }
1773 if (end_off <= beg_off) {
1774 return 1;
1775 }
1776
1777 if (flags & UBC_INVALIDATE) {
1778 /*
91447636
A
1779 * discard the resident pages
1780 */
1781 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
0a7de745 1782 }
1c79356b 1783
0a7de745
A
1784 if (flags & UBC_SYNC) {
1785 /*
91447636 1786 * wait for all the I/O to complete before returning
55e303ae 1787 */
0a7de745
A
1788 request_flags |= MEMORY_OBJECT_IO_SYNC;
1789 }
55e303ae 1790
0a7de745
A
1791 if (flags & UBC_PUSHDIRTY) {
1792 /*
91447636
A
1793 * we only return the dirty pages in the range
1794 */
0a7de745
A
1795 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1796 }
0b4e3aa0 1797
0a7de745
A
1798 if (flags & UBC_PUSHALL) {
1799 /*
2d21ac55
A
1800 * then return all the interesting pages in the range (both
1801 * dirty and precious) to the pager
91447636 1802 */
0a7de745
A
1803 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1804 }
0b4e3aa0 1805
91447636
A
1806 beg_off = trunc_page_64(beg_off);
1807 end_off = round_page_64(end_off);
1808 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 1809
91447636
A
1810 /* flush and/or invalidate pages in the range requested */
1811 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
0a7de745
A
1812 beg_off, tsize,
1813 (memory_object_offset_t *)resid_off,
1814 io_errno, flush_flags, request_flags,
1815 VM_PROT_NO_CHANGE);
1816
1817 return (kret == KERN_SUCCESS) ? 1 : 0;
1c79356b
A
1818}
1819
1c79356b
A
1820
1821/*
fe8ab488 1822 * ubc_map
2d21ac55
A
1823 *
1824 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1825 * to it for the ubc system, if there isn't one already, so it will not be
1826 * recycled while it's in use, and set flags on the ubc_info to indicate that
1827 * we have done this
1828 *
1829 * Parameters: vp The vnode to map
1830 * flags The mapping flags for the vnode; this
1831 * will be a combination of one or more of
1832 * PROT_READ, PROT_WRITE, and PROT_EXEC
1833 *
1834 * Returns: 0 Success
1835 * EPERM Permission was denied
1836 *
1837 * Notes: An I/O reference on the vnode must already be held on entry
1838 *
1839 * If there is no ubc_info associated with the vnode, this function
1840 * will return success.
1841 *
1842 * If a permission error occurs, this function will return
1843 * failure; all other failures will cause this function to return
1844 * success.
1845 *
1846 * IMPORTANT: This is an internal use function, and its symbols
1847 * are not exported, hence its error checking is not very robust.
1848 * It is primarily used by:
1849 *
1850 * o mmap(), when mapping a file
2d21ac55
A
1851 * o When mapping a shared file (a shared library in the
1852 * shared segment region)
1853 * o When loading a program image during the exec process
1854 *
1855 * ...all of these uses ignore the return code, and any fault that
1856 * results later because of a failure is handled in the fix-up path
1857 * of the fault handler. The interface exists primarily as a
1858 * performance hint.
1859 *
1860 * Given that third party implementation of the type of interfaces
1861 * that would use this function, such as alternative executable
1862 * formats, etc., are unsupported, this function is not exported
1863 * for general use.
1864 *
1865 * The extra reference is held until the VM system unmaps the
1866 * vnode from its own context to maintain a vnode reference in
1867 * cases like open()/mmap()/close(), which leave the backing
1868 * object referenced by a mapped memory region in a process
1869 * address space.
1c79356b 1870 */
91447636
A
1871__private_extern__ int
1872ubc_map(vnode_t vp, int flags)
1c79356b
A
1873{
1874 struct ubc_info *uip;
91447636
A
1875 int error = 0;
1876 int need_ref = 0;
2d21ac55 1877 int need_wakeup = 0;
1c79356b 1878
91447636 1879 if (UBCINFOEXISTS(vp)) {
2d21ac55
A
1880 vnode_lock(vp);
1881 uip = vp->v_ubcinfo;
1882
1883 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1884 SET(uip->ui_flags, UI_MAPWAITING);
1885 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 1886 PRIBIO, "ubc_map", NULL);
2d21ac55
A
1887 }
1888 SET(uip->ui_flags, UI_MAPBUSY);
1889 vnode_unlock(vp);
1890
1891 error = VNOP_MMAP(vp, flags, vfs_context_current());
1c79356b 1892
39037602
A
1893 /*
1894 * rdar://problem/22587101 required that we stop propagating
0a7de745 1895 * EPERM up the stack. Otherwise, we would have to funnel up
39037602 1896 * the error at all the call sites for memory_object_map().
0a7de745 1897 * The risk is in having to undo the map/object/entry state at
39037602
A
1898 * all these call sites. It would also affect more than just mmap()
1899 * e.g. vm_remap().
1900 *
1901 * if (error != EPERM)
0a7de745 1902 * error = 0;
39037602
A
1903 */
1904
1905 error = 0;
1c79356b 1906
2d21ac55 1907 vnode_lock_spin(vp);
1c79356b 1908
2d21ac55 1909 if (error == 0) {
0a7de745
A
1910 if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
1911 need_ref = 1;
1912 }
91447636 1913 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
22ba694c
A
1914 if (flags & PROT_WRITE) {
1915 SET(uip->ui_flags, UI_MAPPEDWRITE);
1916 }
2d21ac55
A
1917 }
1918 CLR(uip->ui_flags, UI_MAPBUSY);
55e303ae 1919
2d21ac55
A
1920 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1921 CLR(uip->ui_flags, UI_MAPWAITING);
1922 need_wakeup = 1;
55e303ae 1923 }
2d21ac55 1924 vnode_unlock(vp);
b4c24cb9 1925
0a7de745 1926 if (need_wakeup) {
2d21ac55 1927 wakeup(&uip->ui_flags);
0a7de745 1928 }
2d21ac55 1929
39037602
A
1930 if (need_ref) {
1931 /*
1932 * Make sure we get a ref as we can't unwind from here
1933 */
0a7de745 1934 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
39037602 1935 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
0a7de745 1936 }
cb323159
A
1937 /*
1938 * Vnodes that are on "unreliable" media (like disk
1939 * images, network filesystems, 3rd-party filesystems,
1940 * and possibly external devices) could see their
1941 * contents be changed via the backing store without
1942 * triggering copy-on-write, so we can't fully rely
1943 * on copy-on-write and might have to resort to
1944 * copy-on-read to protect "privileged" processes and
1945 * prevent privilege escalation.
1946 *
1947 * The root filesystem is considered "reliable" because
1948 * there's not much point in trying to protect
1949 * ourselves from such a vulnerability and the extra
1950 * cost of copy-on-read (CPU time and memory pressure)
1951 * could result in some serious regressions.
1952 */
1953 if (vp->v_mount != NULL &&
1954 ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
1955 vnode_on_reliable_media(vp))) {
1956 /*
1957 * This vnode is deemed "reliable" so mark
1958 * its VM object as "trusted".
1959 */
1960 memory_object_mark_trusted(uip->ui_control);
1961 } else {
1962// printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1963 }
39037602 1964 }
2d21ac55 1965 }
0a7de745 1966 return error;
0b4e3aa0
A
1967}
1968
2d21ac55 1969
0b4e3aa0 1970/*
2d21ac55
A
1971 * ubc_destroy_named
1972 *
1973 * Destroy the named memory object associated with the ubc_info control object
1974 * associated with the designated vnode, if there is a ubc_info associated
1975 * with the vnode, and a control object is associated with it
1976 *
1977 * Parameters: vp The designated vnode
1978 *
1979 * Returns: (void)
1980 *
1981 * Notes: This function is called on vnode termination for all vnodes,
1982 * and must therefore not assume that there is a ubc_info that is
1983 * associated with the vnode, nor that there is a control object
1984 * associated with the ubc_info.
1985 *
1986 * If all the conditions necessary are present, this function
1987 * calls memory_object_destory(), which will in turn end up
1988 * calling ubc_unmap() to release any vnode references that were
1989 * established via ubc_map().
1990 *
1991 * IMPORTANT: This is an internal use function that is used
1992 * exclusively by the internal use function vclean().
0b4e3aa0 1993 */
2d21ac55
A
1994__private_extern__ void
1995ubc_destroy_named(vnode_t vp)
0b4e3aa0
A
1996{
1997 memory_object_control_t control;
0b4e3aa0
A
1998 struct ubc_info *uip;
1999 kern_return_t kret;
2000
2d21ac55 2001 if (UBCINFOEXISTS(vp)) {
0a7de745 2002 uip = vp->v_ubcinfo;
2d21ac55
A
2003
2004 /* Terminate the memory object */
2005 control = ubc_getobject(vp, UBC_HOLDOBJECT);
2006 if (control != MEMORY_OBJECT_CONTROL_NULL) {
0a7de745
A
2007 kret = memory_object_destroy(control, 0);
2008 if (kret != KERN_SUCCESS) {
2009 panic("ubc_destroy_named: memory_object_destroy failed");
2010 }
0b4e3aa0
A
2011 }
2012 }
1c79356b
A
2013}
2014
0b4e3aa0 2015
1c79356b 2016/*
2d21ac55
A
2017 * ubc_isinuse
2018 *
2019 * Determine whether or not a vnode is currently in use by ubc at a level in
2020 * excess of the requested busycount
2021 *
2022 * Parameters: vp The vnode to check
2023 * busycount The threshold busy count, used to bias
2024 * the count usually already held by the
2025 * caller to avoid races
2026 *
2027 * Returns: 1 The vnode is in use over the threshold
2028 * 0 The vnode is not in use over the
2029 * threshold
2030 *
2031 * Notes: Because the vnode is only held locked while actually asking
2032 * the use count, this function only represents a snapshot of the
2033 * current state of the vnode. If more accurate information is
2034 * required, an additional busycount should be held by the caller
2035 * and a non-zero busycount used.
2036 *
2037 * If there is no ubc_info associated with the vnode, this
2038 * function will report that the vnode is not in use by ubc.
1c79356b
A
2039 */
2040int
91447636 2041ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 2042{
0a7de745
A
2043 if (!UBCINFOEXISTS(vp)) {
2044 return 0;
2045 }
2046 return ubc_isinuse_locked(vp, busycount, 0);
1c79356b
A
2047}
2048
91447636 2049
2d21ac55
A
2050/*
2051 * ubc_isinuse_locked
2052 *
2053 * Determine whether or not a vnode is currently in use by ubc at a level in
2054 * excess of the requested busycount
2055 *
2056 * Parameters: vp The vnode to check
2057 * busycount The threshold busy count, used to bias
2058 * the count usually already held by the
2059 * caller to avoid races
2060 * locked True if the vnode is already locked by
2061 * the caller
2062 *
2063 * Returns: 1 The vnode is in use over the threshold
2064 * 0 The vnode is not in use over the
2065 * threshold
2066 *
2067 * Notes: If the vnode is not locked on entry, it is locked while
2068 * actually asking the use count. If this is the case, this
2069 * function only represents a snapshot of the current state of
2070 * the vnode. If more accurate information is required, the
2071 * vnode lock should be held by the caller, otherwise an
2072 * additional busycount should be held by the caller and a
2073 * non-zero busycount used.
2074 *
2075 * If there is no ubc_info associated with the vnode, this
2076 * function will report that the vnode is not in use by ubc.
2077 */
1c79356b 2078int
91447636 2079ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 2080{
91447636 2081 int retval = 0;
1c79356b 2082
9bccf70c 2083
0a7de745 2084 if (!locked) {
b0d623f7 2085 vnode_lock_spin(vp);
0a7de745 2086 }
1c79356b 2087
0a7de745 2088 if ((vp->v_usecount - vp->v_kusecount) > busycount) {
91447636 2089 retval = 1;
0a7de745 2090 }
91447636 2091
0a7de745 2092 if (!locked) {
91447636 2093 vnode_unlock(vp);
0a7de745
A
2094 }
2095 return retval;
1c79356b
A
2096}
2097
91447636 2098
1c79356b 2099/*
2d21ac55
A
2100 * ubc_unmap
2101 *
2102 * Reverse the effects of a ubc_map() call for a given vnode
2103 *
2104 * Parameters: vp vnode to unmap from ubc
2105 *
2106 * Returns: (void)
2107 *
2108 * Notes: This is an internal use function used by vnode_pager_unmap().
2109 * It will attempt to obtain a reference on the supplied vnode,
2110 * and if it can do so, and there is an associated ubc_info, and
2111 * the flags indicate that it was mapped via ubc_map(), then the
2112 * flag is cleared, the mapping removed, and the reference taken
2113 * by ubc_map() is released.
2114 *
2115 * IMPORTANT: This MUST only be called by the VM
2116 * to prevent race conditions.
1c79356b 2117 */
0b4e3aa0 2118__private_extern__ void
1c79356b
A
2119ubc_unmap(struct vnode *vp)
2120{
2121 struct ubc_info *uip;
0a7de745
A
2122 int need_rele = 0;
2123 int need_wakeup = 0;
b0d623f7 2124
0a7de745
A
2125 if (vnode_getwithref(vp)) {
2126 return;
2127 }
1c79356b 2128
91447636 2129 if (UBCINFOEXISTS(vp)) {
fe8ab488
A
2130 bool want_fsevent = false;
2131
91447636 2132 vnode_lock(vp);
91447636 2133 uip = vp->v_ubcinfo;
2d21ac55
A
2134
2135 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2136 SET(uip->ui_flags, UI_MAPWAITING);
2137 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 2138 PRIBIO, "ubc_unmap", NULL);
2d21ac55
A
2139 }
2140 SET(uip->ui_flags, UI_MAPBUSY);
2141
91447636 2142 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
0a7de745 2143 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
fe8ab488 2144 want_fsevent = true;
0a7de745 2145 }
fe8ab488 2146
91447636 2147 need_rele = 1;
fe8ab488
A
2148
2149 /*
2150 * We want to clear the mapped flags after we've called
2151 * VNOP_MNOMAP to avoid certain races and allow
2152 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2153 */
91447636
A
2154 }
2155 vnode_unlock(vp);
fe8ab488 2156
91447636 2157 if (need_rele) {
0a7de745 2158 vfs_context_t ctx = vfs_context_current();
fe8ab488 2159
0a7de745 2160 (void)VNOP_MNOMAP(vp, ctx);
fe8ab488
A
2161
2162#if CONFIG_FSE
0a7de745
A
2163 /*
2164 * Why do we want an fsevent here? Normally the
2165 * content modified fsevent is posted when a file is
2166 * closed and only if it's written to via conventional
2167 * means. It's perfectly legal to close a file and
2168 * keep your mappings and we don't currently track
2169 * whether it was written to via a mapping.
2170 * Therefore, we need to post an fsevent here if the
2171 * file was mapped writable. This may result in false
2172 * events, i.e. we post a notification when nothing
2173 * has really changed.
2174 */
2175 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2176 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2177 FSE_ARG_VNODE, vp,
2178 FSE_ARG_DONE);
2179 }
fe8ab488
A
2180#endif
2181
0a7de745 2182 vnode_rele(vp);
91447636 2183 }
2d21ac55
A
2184
2185 vnode_lock_spin(vp);
2186
0a7de745 2187 if (need_rele) {
fe8ab488 2188 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
0a7de745 2189 }
fe8ab488 2190
2d21ac55 2191 CLR(uip->ui_flags, UI_MAPBUSY);
fe8ab488 2192
2d21ac55
A
2193 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2194 CLR(uip->ui_flags, UI_MAPWAITING);
2195 need_wakeup = 1;
2196 }
2197 vnode_unlock(vp);
2198
0a7de745
A
2199 if (need_wakeup) {
2200 wakeup(&uip->ui_flags);
2201 }
91447636
A
2202 }
2203 /*
2204 * the drop of the vnode ref will cleanup
2205 */
2206 vnode_put(vp);
0b4e3aa0
A
2207}
2208
2d21ac55
A
2209
2210/*
2211 * ubc_page_op
2212 *
2213 * Manipulate individual page state for a vnode with an associated ubc_info
2214 * with an associated memory object control.
2215 *
2216 * Parameters: vp The vnode backing the page
2217 * f_offset A file offset interior to the page
2218 * ops The operations to perform, as a bitmap
2219 * (see below for more information)
2220 * phys_entryp The address of a ppnum_t; may be NULL
2221 * to ignore
2222 * flagsp A pointer to an int to contain flags;
2223 * may be NULL to ignore
2224 *
2225 * Returns: KERN_SUCCESS Success
2226 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2227 * object associated
2228 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2229 * not physically contiguous
2230 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2231 * physically contiguous
2232 * KERN_FAILURE If the page cannot be looked up
2233 *
2234 * Implicit Returns:
2235 * *phys_entryp (modified) If phys_entryp is non-NULL and
2236 * UPL_POP_PHYSICAL
2237 * *flagsp (modified) If flagsp is non-NULL and there was
2238 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2239 *
2240 * Notes: For object boundaries, it is considerably more efficient to
2241 * ensure that f_offset is in fact on a page boundary, as this
2242 * will avoid internal use of the hash table to identify the
2243 * page, and would therefore skip a number of early optimizations.
2244 * Since this is a page operation anyway, the caller should try
2245 * to pass only a page aligned offset because of this.
2246 *
2247 * *flagsp may be modified even if this function fails. If it is
2248 * modified, it will contain the condition of the page before the
2249 * requested operation was attempted; these will only include the
2250 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2251 * UPL_POP_SET, or UPL_POP_CLR bits.
2252 *
2253 * The flags field may contain a specific operation, such as
2254 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2255 *
2256 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2257 * *phys_entryp and successful, set
2258 * *phys_entryp
2259 * o UPL_POP_DUMP Dump the specified page
2260 *
2261 * Otherwise, it is treated as a bitmap of one or more page
2262 * operations to perform on the final memory object; allowable
2263 * bit values are:
2264 *
2265 * o UPL_POP_DIRTY The page is dirty
2266 * o UPL_POP_PAGEOUT The page is paged out
2267 * o UPL_POP_PRECIOUS The page is precious
2268 * o UPL_POP_ABSENT The page is absent
2269 * o UPL_POP_BUSY The page is busy
2270 *
2271 * If the page status is only being queried and not modified, then
2272 * not other bits should be specified. However, if it is being
2273 * modified, exactly ONE of the following bits should be set:
2274 *
2275 * o UPL_POP_SET Set the current bitmap bits
2276 * o UPL_POP_CLR Clear the current bitmap bits
2277 *
2278 * Thus to effect a combination of setting an clearing, it may be
2279 * necessary to call this function twice. If this is done, the
2280 * set should be used before the clear, since clearing may trigger
2281 * a wakeup on the destination page, and if the page is backed by
2282 * an encrypted swap file, setting will trigger the decryption
2283 * needed before the wakeup occurs.
2284 */
0b4e3aa0
A
2285kern_return_t
2286ubc_page_op(
0a7de745
A
2287 struct vnode *vp,
2288 off_t f_offset,
2289 int ops,
2290 ppnum_t *phys_entryp,
2291 int *flagsp)
0b4e3aa0 2292{
0a7de745 2293 memory_object_control_t control;
0b4e3aa0
A
2294
2295 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2296 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2297 return KERN_INVALID_ARGUMENT;
0a7de745 2298 }
0b4e3aa0 2299
0a7de745
A
2300 return memory_object_page_op(control,
2301 (memory_object_offset_t)f_offset,
2302 ops,
2303 phys_entryp,
2304 flagsp);
0b4e3aa0 2305}
2d21ac55
A
2306
2307
2308/*
2309 * ubc_range_op
2310 *
2311 * Manipulate page state for a range of memory for a vnode with an associated
2312 * ubc_info with an associated memory object control, when page level state is
2313 * not required to be returned from the call (i.e. there are no phys_entryp or
2314 * flagsp parameters to this call, and it takes a range which may contain
2315 * multiple pages, rather than an offset interior to a single page).
2316 *
2317 * Parameters: vp The vnode backing the page
2318 * f_offset_beg A file offset interior to the start page
2319 * f_offset_end A file offset interior to the end page
2320 * ops The operations to perform, as a bitmap
2321 * (see below for more information)
2322 * range The address of an int; may be NULL to
2323 * ignore
2324 *
2325 * Returns: KERN_SUCCESS Success
2326 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2327 * object associated
2328 * KERN_INVALID_OBJECT If the object is physically contiguous
2329 *
2330 * Implicit Returns:
2331 * *range (modified) If range is non-NULL, its contents will
2332 * be modified to contain the number of
2333 * bytes successfully operated upon.
2334 *
2335 * Notes: IMPORTANT: This function cannot be used on a range that
2336 * consists of physically contiguous pages.
2337 *
2338 * For object boundaries, it is considerably more efficient to
2339 * ensure that f_offset_beg and f_offset_end are in fact on page
2340 * boundaries, as this will avoid internal use of the hash table
2341 * to identify the page, and would therefore skip a number of
2342 * early optimizations. Since this is an operation on a set of
2343 * pages anyway, the caller should try to pass only a page aligned
2344 * offsets because of this.
2345 *
2346 * *range will be modified only if this function succeeds.
2347 *
2348 * The flags field MUST contain a specific operation; allowable
2349 * values are:
2350 *
2351 * o UPL_ROP_ABSENT Returns the extent of the range
2352 * presented which is absent, starting
2353 * with the start address presented
2354 *
2355 * o UPL_ROP_PRESENT Returns the extent of the range
2356 * presented which is present (resident),
2357 * starting with the start address
2358 * presented
2359 * o UPL_ROP_DUMP Dump the pages which are found in the
2360 * target object for the target range.
2361 *
2362 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2363 * multiple regions in the range, only the first matching region
2364 * is returned.
2365 */
55e303ae
A
2366kern_return_t
2367ubc_range_op(
0a7de745
A
2368 struct vnode *vp,
2369 off_t f_offset_beg,
2370 off_t f_offset_end,
55e303ae
A
2371 int ops,
2372 int *range)
2373{
0a7de745 2374 memory_object_control_t control;
55e303ae
A
2375
2376 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2377 if (control == MEMORY_OBJECT_CONTROL_NULL) {
55e303ae 2378 return KERN_INVALID_ARGUMENT;
0a7de745 2379 }
55e303ae 2380
0a7de745
A
2381 return memory_object_range_op(control,
2382 (memory_object_offset_t)f_offset_beg,
2383 (memory_object_offset_t)f_offset_end,
2384 ops,
2385 range);
55e303ae 2386}
2d21ac55
A
2387
2388
2389/*
2390 * ubc_create_upl
2391 *
2392 * Given a vnode, cause the population of a portion of the vm_object; based on
2393 * the nature of the request, the pages returned may contain valid data, or
2394 * they may be uninitialized.
2395 *
2396 * Parameters: vp The vnode from which to create the upl
2397 * f_offset The start offset into the backing store
2398 * represented by the vnode
2399 * bufsize The size of the upl to create
2400 * uplp Pointer to the upl_t to receive the
2401 * created upl; MUST NOT be NULL
2402 * plp Pointer to receive the internal page
2403 * list for the created upl; MAY be NULL
2404 * to ignore
2405 *
2406 * Returns: KERN_SUCCESS The requested upl has been created
2407 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2408 * multiple of the page size
2409 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2410 * the vnode, or there is no memory object
0a7de745 2411 * control associated with the ubc_info
2d21ac55
A
2412 * memory_object_upl_request:KERN_INVALID_VALUE
2413 * The supplied upl_flags argument is
2414 * invalid
2415 * Implicit Returns:
0a7de745 2416 * *uplp (modified)
2d21ac55
A
2417 * *plp (modified) If non-NULL, the value of *plp will be
2418 * modified to point to the internal page
2419 * list; this modification may occur even
2420 * if this function is unsuccessful, in
2421 * which case the contents may be invalid
2422 *
2423 * Note: If successful, the returned *uplp MUST subsequently be freed
2424 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2425 * ubc_upl_abort(), or ubc_upl_abort_range().
2426 */
0b4e3aa0 2427kern_return_t
5ba3f43e 2428ubc_create_upl_external(
0a7de745
A
2429 struct vnode *vp,
2430 off_t f_offset,
2431 int bufsize,
2432 upl_t *uplp,
2433 upl_page_info_t **plp,
2434 int uplflags)
5ba3f43e 2435{
0a7de745 2436 return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
5ba3f43e
A
2437}
2438
2439kern_return_t
2440ubc_create_upl_kernel(
0a7de745
A
2441 struct vnode *vp,
2442 off_t f_offset,
2443 int bufsize,
2444 upl_t *uplp,
2445 upl_page_info_t **plp,
2446 int uplflags,
5ba3f43e 2447 vm_tag_t tag)
0b4e3aa0 2448{
0a7de745
A
2449 memory_object_control_t control;
2450 kern_return_t kr;
b0d623f7 2451
0a7de745 2452 if (plp != NULL) {
b0d623f7 2453 *plp = NULL;
0a7de745 2454 }
b0d623f7 2455 *uplp = NULL;
0a7de745
A
2456
2457 if (bufsize & 0xfff) {
0b4e3aa0 2458 return KERN_INVALID_ARGUMENT;
0a7de745 2459 }
0b4e3aa0 2460
0a7de745 2461 if (bufsize > MAX_UPL_SIZE_BYTES) {
6d2010ae 2462 return KERN_INVALID_ARGUMENT;
0a7de745 2463 }
6d2010ae 2464
b0d623f7 2465 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
b0d623f7
A
2466 if (uplflags & UPL_UBC_MSYNC) {
2467 uplflags &= UPL_RET_ONLY_DIRTY;
2468
2469 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
0a7de745 2470 UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7
A
2471 } else if (uplflags & UPL_UBC_PAGEOUT) {
2472 uplflags &= UPL_RET_ONLY_DIRTY;
2473
0a7de745 2474 if (uplflags & UPL_RET_ONLY_DIRTY) {
b0d623f7 2475 uplflags |= UPL_NOBLOCK;
0a7de745 2476 }
b0d623f7
A
2477
2478 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
0a7de745 2479 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7 2480 } else {
316670eb 2481 uplflags |= UPL_RET_ONLY_ABSENT |
0a7de745
A
2482 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2483 UPL_SET_INTERNAL | UPL_SET_LITE;
316670eb
A
2484
2485 /*
2486 * if the requested size == PAGE_SIZE, we don't want to set
2487 * the UPL_NOBLOCK since we may be trying to recover from a
2488 * previous partial pagein I/O that occurred because we were low
2489 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2490 * since we're only asking for a single page, we can block w/o fear
2491 * of tying up pages while waiting for more to become available
2492 */
0a7de745 2493 if (bufsize > PAGE_SIZE) {
316670eb 2494 uplflags |= UPL_NOBLOCK;
0a7de745 2495 }
b0d623f7
A
2496 }
2497 } else {
55e303ae 2498 uplflags &= ~UPL_FOR_PAGEOUT;
55e303ae 2499
b0d623f7
A
2500 if (uplflags & UPL_WILL_BE_DUMPED) {
2501 uplflags &= ~UPL_WILL_BE_DUMPED;
0a7de745
A
2502 uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2503 } else {
2504 uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2505 }
b0d623f7
A
2506 }
2507 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2508 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2509 return KERN_INVALID_ARGUMENT;
0a7de745 2510 }
0b4e3aa0 2511
5ba3f43e 2512 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
0a7de745 2513 if (kr == KERN_SUCCESS && plp != NULL) {
b0d623f7 2514 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
0a7de745 2515 }
0b4e3aa0
A
2516 return kr;
2517}
0a7de745
A
2518
2519
2d21ac55
A
2520/*
2521 * ubc_upl_maxbufsize
2522 *
2523 * Return the maximum bufsize ubc_create_upl( ) will take.
2524 *
2525 * Parameters: none
2526 *
2527 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2528 */
0a7de745 2529upl_size_t
2d21ac55
A
2530ubc_upl_maxbufsize(
2531 void)
2532{
0a7de745 2533 return MAX_UPL_SIZE_BYTES;
2d21ac55 2534}
0b4e3aa0 2535
2d21ac55
A
2536/*
2537 * ubc_upl_map
2538 *
2539 * Map the page list assocated with the supplied upl into the kernel virtual
2540 * address space at the virtual address indicated by the dst_addr argument;
2541 * the entire upl is mapped
2542 *
2543 * Parameters: upl The upl to map
2544 * dst_addr The address at which to map the upl
2545 *
2546 * Returns: KERN_SUCCESS The upl has been mapped
2547 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2548 * KERN_FAILURE The upl is already mapped
2549 * vm_map_enter:KERN_INVALID_ARGUMENT
2550 * A failure code from vm_map_enter() due
2551 * to an invalid argument
2552 */
0b4e3aa0
A
2553kern_return_t
2554ubc_upl_map(
0a7de745
A
2555 upl_t upl,
2556 vm_offset_t *dst_addr)
0b4e3aa0 2557{
0a7de745 2558 return vm_upl_map(kernel_map, upl, dst_addr);
0b4e3aa0
A
2559}
2560
2561
2d21ac55
A
2562/*
2563 * ubc_upl_unmap
2564 *
2565 * Unmap the page list assocated with the supplied upl from the kernel virtual
2566 * address space; the entire upl is unmapped.
2567 *
2568 * Parameters: upl The upl to unmap
2569 *
2570 * Returns: KERN_SUCCESS The upl has been unmapped
2571 * KERN_FAILURE The upl is not currently mapped
2572 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2573 */
0b4e3aa0
A
2574kern_return_t
2575ubc_upl_unmap(
0a7de745 2576 upl_t upl)
0b4e3aa0 2577{
0a7de745 2578 return vm_upl_unmap(kernel_map, upl);
0b4e3aa0
A
2579}
2580
2d21ac55
A
2581
2582/*
2583 * ubc_upl_commit
2584 *
2585 * Commit the contents of the upl to the backing store
2586 *
2587 * Parameters: upl The upl to commit
2588 *
2589 * Returns: KERN_SUCCESS The upl has been committed
2590 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2591 * KERN_FAILURE The supplied upl does not represent
2592 * device memory, and the offset plus the
2593 * size would exceed the actual size of
2594 * the upl
2595 *
2596 * Notes: In practice, the only return value for this function should be
2597 * KERN_SUCCESS, unless there has been data structure corruption;
2598 * since the upl is deallocated regardless of success or failure,
2599 * there's really nothing to do about this other than panic.
2600 *
2601 * IMPORTANT: Use of this function should not be mixed with use of
2602 * ubc_upl_commit_range(), due to the unconditional deallocation
2603 * by this function.
2604 */
0b4e3aa0
A
2605kern_return_t
2606ubc_upl_commit(
0a7de745 2607 upl_t upl)
0b4e3aa0 2608{
0a7de745
A
2609 upl_page_info_t *pl;
2610 kern_return_t kr;
0b4e3aa0
A
2611
2612 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
fe8ab488 2613 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
0b4e3aa0
A
2614 upl_deallocate(upl);
2615 return kr;
1c79356b
A
2616}
2617
0b4e3aa0 2618
2d21ac55
A
2619/*
2620 * ubc_upl_commit
2621 *
2622 * Commit the contents of the specified range of the upl to the backing store
2623 *
2624 * Parameters: upl The upl to commit
2625 * offset The offset into the upl
2626 * size The size of the region to be committed,
2627 * starting at the specified offset
2628 * flags commit type (see below)
2629 *
2630 * Returns: KERN_SUCCESS The range has been committed
2631 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2632 * KERN_FAILURE The supplied upl does not represent
2633 * device memory, and the offset plus the
2634 * size would exceed the actual size of
2635 * the upl
2636 *
2637 * Notes: IMPORTANT: If the commit is successful, and the object is now
2638 * empty, the upl will be deallocated. Since the caller cannot
2639 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2640 * should generally only be used when the offset is 0 and the size
2641 * is equal to the upl size.
2642 *
2643 * The flags argument is a bitmap of flags on the rage of pages in
2644 * the upl to be committed; allowable flags are:
2645 *
2646 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2647 * both empty and has been
2648 * successfully committed
2649 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2650 * bit; will prevent a
2651 * later pageout
2652 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2653 * bit; will cause a later
2654 * pageout
2655 * o UPL_COMMIT_INACTIVATE Clear each pages
2656 * reference bit; the page
2657 * will not be accessed
2658 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2659 * become busy when an
2660 * IOMemoryDescriptor is
2661 * mapped or redirected,
2662 * and we have to wait for
2663 * an IOKit driver
2664 *
2665 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2666 * not be specified by the caller.
2667 *
2668 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2669 * mutually exclusive, and should not be combined.
2670 */
0b4e3aa0
A
2671kern_return_t
2672ubc_upl_commit_range(
0a7de745
A
2673 upl_t upl,
2674 upl_offset_t offset,
2675 upl_size_t size,
2676 int flags)
0b4e3aa0 2677{
0a7de745
A
2678 upl_page_info_t *pl;
2679 boolean_t empty;
2680 kern_return_t kr;
0b4e3aa0 2681
0a7de745 2682 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
0b4e3aa0 2683 flags |= UPL_COMMIT_NOTIFY_EMPTY;
0a7de745 2684 }
0b4e3aa0 2685
593a1d5f
A
2686 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2687 return KERN_INVALID_ARGUMENT;
2688 }
2689
0b4e3aa0
A
2690 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2691
2692 kr = upl_commit_range(upl, offset, size, flags,
0a7de745 2693 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
0b4e3aa0 2694
0a7de745 2695 if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2696 upl_deallocate(upl);
0a7de745 2697 }
0b4e3aa0
A
2698
2699 return kr;
2700}
2d21ac55
A
2701
2702
2703/*
2704 * ubc_upl_abort_range
2705 *
2706 * Abort the contents of the specified range of the specified upl
2707 *
2708 * Parameters: upl The upl to abort
2709 * offset The offset into the upl
2710 * size The size of the region to be aborted,
2711 * starting at the specified offset
2712 * abort_flags abort type (see below)
2713 *
2714 * Returns: KERN_SUCCESS The range has been aborted
2715 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2716 * KERN_FAILURE The supplied upl does not represent
2717 * device memory, and the offset plus the
2718 * size would exceed the actual size of
2719 * the upl
2720 *
2721 * Notes: IMPORTANT: If the abort is successful, and the object is now
2722 * empty, the upl will be deallocated. Since the caller cannot
2723 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2724 * should generally only be used when the offset is 0 and the size
2725 * is equal to the upl size.
2726 *
2727 * The abort_flags argument is a bitmap of flags on the range of
2728 * pages in the upl to be aborted; allowable flags are:
2729 *
2730 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2731 * empty and has been successfully
2732 * aborted
2733 * o UPL_ABORT_RESTART The operation must be restarted
2734 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2735 * o UPL_ABORT_ERROR An I/O error occurred
2736 * o UPL_ABORT_DUMP_PAGES Just free the pages
2737 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2738 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2739 *
2740 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2741 * not be specified by the caller. It is intended to fulfill the
2742 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2743 * ubc_upl_commit_range(), but is never referenced internally.
2744 *
2745 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2746 * referenced; do not use it.
2747 */
0b4e3aa0
A
2748kern_return_t
2749ubc_upl_abort_range(
0a7de745
A
2750 upl_t upl,
2751 upl_offset_t offset,
2752 upl_size_t size,
2753 int abort_flags)
0b4e3aa0 2754{
0a7de745
A
2755 kern_return_t kr;
2756 boolean_t empty = FALSE;
0b4e3aa0 2757
0a7de745 2758 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
0b4e3aa0 2759 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
0a7de745 2760 }
0b4e3aa0
A
2761
2762 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2763
0a7de745 2764 if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2765 upl_deallocate(upl);
0a7de745 2766 }
0b4e3aa0
A
2767
2768 return kr;
2769}
2770
2d21ac55
A
2771
2772/*
2773 * ubc_upl_abort
2774 *
2775 * Abort the contents of the specified upl
2776 *
2777 * Parameters: upl The upl to abort
2778 * abort_type abort type (see below)
2779 *
2780 * Returns: KERN_SUCCESS The range has been aborted
2781 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2782 * KERN_FAILURE The supplied upl does not represent
2783 * device memory, and the offset plus the
2784 * size would exceed the actual size of
2785 * the upl
2786 *
2787 * Notes: IMPORTANT: If the abort is successful, and the object is now
2788 * empty, the upl will be deallocated. Since the caller cannot
2789 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2790 * should generally only be used when the offset is 0 and the size
2791 * is equal to the upl size.
2792 *
2793 * The abort_type is a bitmap of flags on the range of
2794 * pages in the upl to be aborted; allowable flags are:
2795 *
2796 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2797 * empty and has been successfully
2798 * aborted
2799 * o UPL_ABORT_RESTART The operation must be restarted
2800 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2801 * o UPL_ABORT_ERROR An I/O error occurred
2802 * o UPL_ABORT_DUMP_PAGES Just free the pages
2803 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2804 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2805 *
2806 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2807 * not be specified by the caller. It is intended to fulfill the
2808 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2809 * ubc_upl_commit_range(), but is never referenced internally.
2810 *
2811 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2812 * referenced; do not use it.
2813 */
0b4e3aa0
A
2814kern_return_t
2815ubc_upl_abort(
0a7de745
A
2816 upl_t upl,
2817 int abort_type)
0b4e3aa0 2818{
0a7de745 2819 kern_return_t kr;
0b4e3aa0
A
2820
2821 kr = upl_abort(upl, abort_type);
2822 upl_deallocate(upl);
2823 return kr;
2824}
2825
2d21ac55
A
2826
2827/*
2828 * ubc_upl_pageinfo
2829 *
2830 * Retrieve the internal page list for the specified upl
2831 *
2832 * Parameters: upl The upl to obtain the page list from
2833 *
2834 * Returns: !NULL The (upl_page_info_t *) for the page
2835 * list internal to the upl
2836 * NULL Error/no page list associated
2837 *
2838 * Notes: IMPORTANT: The function is only valid on internal objects
2839 * where the list request was made with the UPL_INTERNAL flag.
2840 *
2841 * This function is a utility helper function, since some callers
2842 * may not have direct access to the header defining the macro,
2843 * due to abstraction layering constraints.
2844 */
0b4e3aa0
A
2845upl_page_info_t *
2846ubc_upl_pageinfo(
0a7de745
A
2847 upl_t upl)
2848{
2849 return UPL_GET_INTERNAL_PAGE_LIST(upl);
0b4e3aa0 2850}
91447636 2851
91447636 2852
0a7de745 2853int
fe8ab488 2854UBCINFOEXISTS(const struct vnode * vp)
91447636 2855{
0a7de745 2856 return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
91447636
A
2857}
2858
2d21ac55 2859
316670eb
A
2860void
2861ubc_upl_range_needed(
0a7de745
A
2862 upl_t upl,
2863 int index,
2864 int count)
316670eb
A
2865{
2866 upl_range_needed(upl, index, count);
2867}
2868
0a7de745
A
2869boolean_t
2870ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
fe8ab488 2871{
0a7de745 2872 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
fe8ab488 2873 return FALSE;
0a7de745
A
2874 }
2875 if (writable) {
fe8ab488 2876 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
0a7de745 2877 }
fe8ab488
A
2878 return TRUE;
2879}
2880
0a7de745
A
2881boolean_t
2882ubc_is_mapped_writable(const struct vnode *vp)
fe8ab488
A
2883{
2884 boolean_t writable;
2885 return ubc_is_mapped(vp, &writable) && writable;
2886}
2887
316670eb 2888
2d21ac55
A
2889/*
2890 * CODE SIGNING
2891 */
f427ee49
A
2892static atomic_size_t cs_blob_size = 0;
2893static atomic_uint_fast32_t cs_blob_count = 0;
2894static atomic_size_t cs_blob_size_peak = 0;
2895static atomic_size_t cs_blob_size_max = 0;
2896static atomic_uint_fast32_t cs_blob_count_peak = 0;
2897
2898SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
2899SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
2900SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2901SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
2902SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
2d21ac55 2903
3e170ce0
A
2904/*
2905 * Function: csblob_parse_teamid
2906 *
2907 * Description: This function returns a pointer to the team id
0a7de745
A
2908 * stored within the codedirectory of the csblob.
2909 * If the codedirectory predates team-ids, it returns
2910 * NULL.
2911 * This does not copy the name but returns a pointer to
2912 * it within the CD. Subsequently, the CD must be
2913 * available when this is used.
2914 */
3e170ce0
A
2915
2916static const char *
2917csblob_parse_teamid(struct cs_blob *csblob)
2918{
2919 const CS_CodeDirectory *cd;
2920
490019cf 2921 cd = csblob->csb_cd;
3e170ce0 2922
0a7de745 2923 if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3e170ce0 2924 return NULL;
0a7de745 2925 }
3e170ce0 2926
0a7de745 2927 if (cd->teamOffset == 0) {
3e170ce0 2928 return NULL;
0a7de745 2929 }
3e170ce0
A
2930
2931 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
0a7de745 2932 if (cs_debug > 1) {
3e170ce0 2933 printf("found team-id %s in cdblob\n", name);
0a7de745 2934 }
3e170ce0
A
2935
2936 return name;
2937}
2938
39236c6e 2939
593a1d5f
A
2940kern_return_t
2941ubc_cs_blob_allocate(
0a7de745
A
2942 vm_offset_t *blob_addr_p,
2943 vm_size_t *blob_size_p)
593a1d5f 2944{
0a7de745 2945 kern_return_t kr = KERN_FAILURE;
593a1d5f 2946
d9a64523
A
2947 {
2948 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2949
2950 if (*blob_addr_p == 0) {
2951 kr = KERN_NO_SPACE;
2952 } else {
2953 kr = KERN_SUCCESS;
2954 }
593a1d5f 2955 }
d9a64523 2956
593a1d5f
A
2957 return kr;
2958}
2959
2960void
2961ubc_cs_blob_deallocate(
0a7de745
A
2962 vm_offset_t blob_addr,
2963 vm_size_t blob_size)
593a1d5f 2964{
d9a64523 2965 {
0a7de745 2966 kfree(blob_addr, blob_size);
d9a64523 2967 }
39037602
A
2968}
2969
2970/*
2971 * Some codesigned files use a lowest common denominator page size of
2972 * 4KiB, but can be used on systems that have a runtime page size of
2973 * 16KiB. Since faults will only occur on 16KiB ranges in
2974 * cs_validate_range(), we can convert the original Code Directory to
2975 * a multi-level scheme where groups of 4 hashes are combined to form
2976 * a new hash, which represents 16KiB in the on-disk file. This can
2977 * reduce the wired memory requirement for the Code Directory by
2978 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2979 * for unaligned access, which may still attempt to validate on
2980 * non-16KiB multiples for compatibility with 3rd party binaries.
2981 */
2982static boolean_t
f427ee49 2983ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
39037602
A
2984{
2985 const CS_CodeDirectory *cd;
2986
0a7de745 2987
39037602
A
2988 /*
2989 * Only applies to binaries that ship as part of the OS,
2990 * primarily the shared cache.
2991 */
2992 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
2993 return FALSE;
2994 }
2995
2996 /*
2997 * If the runtime page size matches the code signing page
2998 * size, there is no work to do.
2999 */
3000 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3001 return FALSE;
3002 }
3003
3004 cd = blob->csb_cd;
3005
3006 /*
3007 * There must be a valid integral multiple of hashes
3008 */
3009 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3010 return FALSE;
3011 }
3012
3013 /*
3014 * Scatter lists must also have ranges that have an integral number of hashes
3015 */
3016 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 3017 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3018 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3019 /* iterate all scatter structs to make sure they are all aligned */
3020 do {
3021 uint32_t sbase = ntohl(scatter->base);
3022 uint32_t scount = ntohl(scatter->count);
3023
3024 /* last scatter? */
3025 if (scount == 0) {
3026 break;
3027 }
3028
3029 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3030 return FALSE;
3031 }
3032
3033 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3034 return FALSE;
3035 }
3036
3037 scatter++;
0a7de745 3038 } while (1);
39037602
A
3039 }
3040
3041 /* Covered range must be a multiple of the new page size */
3042 if (ntohl(cd->codeLimit) & PAGE_MASK) {
3043 return FALSE;
3044 }
3045
3046 /* All checks pass */
3047 return TRUE;
3048}
3049
3050/*
d9a64523
A
3051 * Given a cs_blob with an already chosen best code directory, this
3052 * function allocates memory and copies into it only the blobs that
3053 * will be needed by the kernel, namely the single chosen code
3054 * directory (and not any of its alternatives) and the entitlement
3055 * blob.
3056 *
3057 * This saves significant memory with agile signatures, and additional
3058 * memory for 3rd Party Code because we also omit the CMS blob.
3059 *
3060 * To support multilevel and other potential code directory rewriting,
3061 * the size of a new code directory can be specified. Since that code
3062 * directory will replace the existing code directory,
3063 * ubc_cs_reconstitute_code_signature does not copy the original code
3064 * directory when a size is given, and the caller must fill it in.
39037602 3065 */
d9a64523
A
3066static int
3067ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
0a7de745
A
3068 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
3069 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
39037602 3070{
0a7de745
A
3071 const CS_CodeDirectory *old_cd, *cd;
3072 CS_CodeDirectory *new_cd;
39037602
A
3073 const CS_GenericBlob *entitlements;
3074 vm_offset_t new_blob_addr;
3075 vm_size_t new_blob_size;
3076 vm_size_t new_cdsize;
0a7de745
A
3077 kern_return_t kr;
3078 int error;
39037602
A
3079
3080 old_cd = blob->csb_cd;
3081
d9a64523 3082 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
39037602
A
3083
3084 new_blob_size = sizeof(CS_SuperBlob);
3085 new_blob_size += sizeof(CS_BlobIndex);
3086 new_blob_size += new_cdsize;
3087
3088 if (blob->csb_entitlements_blob) {
3089 /* We need to add a slot for the entitlements */
c3c9b80d
A
3090 ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
3091 ntohl(blob->csb_entitlements_blob->length),
3092 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
3093 PTRAUTH_ADDR_DIVERSIFY,
3094 blob->csb_entitlements_blob_signature);
3095
39037602
A
3096 new_blob_size += sizeof(CS_BlobIndex);
3097 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
3098 }
3099
3100 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
3101 if (kr != KERN_SUCCESS) {
3102 if (cs_debug > 1) {
3103 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
0a7de745 3104 kr);
39037602 3105 }
d9a64523 3106 return ENOMEM;
39037602
A
3107 }
3108
0a7de745 3109 CS_SuperBlob *new_superblob;
39037602
A
3110
3111 new_superblob = (CS_SuperBlob *)new_blob_addr;
3112 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3113 new_superblob->length = htonl((uint32_t)new_blob_size);
3114 if (blob->csb_entitlements_blob) {
0a7de745 3115 vm_size_t ent_offset, cd_offset;
39037602
A
3116
3117 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
3118 ent_offset = cd_offset + new_cdsize;
3119
3120 new_superblob->count = htonl(2);
3121 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3122 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3123 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3124 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3125
c3c9b80d
A
3126 ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
3127 ntohl(blob->csb_entitlements_blob->length),
3128 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
3129 PTRAUTH_ADDR_DIVERSIFY,
3130 blob->csb_entitlements_blob_signature);
3131
39037602
A
3132 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3133
3134 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3135 } else {
d9a64523
A
3136 // Blob is the code directory, directly.
3137 new_cd = (CS_CodeDirectory *)new_blob_addr;
3138 }
39037602 3139
d9a64523
A
3140 if (optional_new_cd_size == 0) {
3141 // Copy code directory, and revalidate.
3142 memcpy(new_cd, old_cd, new_cdsize);
39037602 3143
d9a64523 3144 vm_size_t length = new_blob_size;
39037602 3145
d9a64523
A
3146 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3147
3148 if (error) {
3149 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3150 error);
d9a64523
A
3151
3152 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3153 return error;
3154 }
3155 *new_entitlements_p = entitlements;
3156 } else {
3157 // Caller will fill out and validate code directory.
3158 memset(new_cd, 0, new_cdsize);
3159 *new_entitlements_p = NULL;
3160 }
3161
3162 *new_blob_addr_p = new_blob_addr;
3163 *new_blob_size_p = new_blob_size;
3164 *new_cd_p = new_cd;
3165
3166 return 0;
3167}
3168
3169static int
3170ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3171{
0a7de745
A
3172 const CS_CodeDirectory *old_cd, *cd;
3173 CS_CodeDirectory *new_cd;
d9a64523
A
3174 const CS_GenericBlob *entitlements;
3175 vm_offset_t new_blob_addr;
3176 vm_size_t new_blob_size;
3177 vm_size_t new_cdsize;
0a7de745 3178 int error;
d9a64523 3179
0a7de745 3180 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
d9a64523
A
3181
3182 if (cs_debug > 1) {
3183 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
0a7de745 3184 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
d9a64523
A
3185 }
3186
3187 old_cd = blob->csb_cd;
3188
3189 /* Up to the hashes, we can copy all data */
3190 new_cdsize = ntohl(old_cd->hashOffset);
3191 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3192
3193 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
0a7de745
A
3194 &new_blob_addr, &new_blob_size, &new_cd,
3195 &entitlements);
d9a64523
A
3196 if (error != 0) {
3197 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3198 return error;
39037602
A
3199 }
3200
3201 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3202
3203 /* Update fields in the Code Directory structure */
3204 new_cd->length = htonl((uint32_t)new_cdsize);
3205
3206 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3207 nCodeSlots >>= hashes_per_new_hash_shift;
3208 new_cd->nCodeSlots = htonl(nCodeSlots);
3209
f427ee49 3210 new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
39037602
A
3211
3212 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3213 SC_Scatter *scatter = (SC_Scatter*)
0a7de745 3214 ((char *)new_cd + ntohl(new_cd->scatterOffset));
39037602
A
3215 /* iterate all scatter structs to scale their counts */
3216 do {
3217 uint32_t scount = ntohl(scatter->count);
3218 uint32_t sbase = ntohl(scatter->base);
3219
3220 /* last scatter? */
3221 if (scount == 0) {
3222 break;
3223 }
3224
3225 scount >>= hashes_per_new_hash_shift;
3226 scatter->count = htonl(scount);
3227
3228 sbase >>= hashes_per_new_hash_shift;
3229 scatter->base = htonl(sbase);
3230
3231 scatter++;
0a7de745 3232 } while (1);
39037602
A
3233 }
3234
3235 /* For each group of hashes, hash them together */
3236 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3237 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3238
3239 uint32_t hash_index;
3240 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
0a7de745 3241 union cs_hash_union mdctx;
39037602
A
3242
3243 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3244 const unsigned char *src = src_base + hash_index * source_hash_len;
3245 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3246
3247 blob->csb_hashtype->cs_init(&mdctx);
3248 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3249 blob->csb_hashtype->cs_final(dst, &mdctx);
3250 }
3251
d9a64523
A
3252 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3253 if (error != 0) {
d9a64523 3254 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3255 error);
39037602
A
3256
3257 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
d9a64523 3258 return error;
39037602
A
3259 }
3260
0a7de745 3261 /* New Code Directory is ready for use, swap it out in the blob structure */
c3c9b80d 3262 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
39037602
A
3263
3264 blob->csb_mem_size = new_blob_size;
c3c9b80d 3265 blob->csb_mem_kaddr = (void *)new_blob_addr;
39037602
A
3266 blob->csb_cd = cd;
3267 blob->csb_entitlements_blob = entitlements;
c3c9b80d
A
3268 if (blob->csb_entitlements_blob != NULL) {
3269 blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
3270 ntohl(blob->csb_entitlements_blob->length),
3271 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
3272 PTRAUTH_ADDR_DIVERSIFY);
3273 }
39037602
A
3274
3275 /* The blob has some cached attributes of the Code Directory, so update those */
3276
f427ee49 3277 blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
39037602 3278
39037602
A
3279 blob->csb_hash_pageshift = PAGE_SHIFT;
3280 blob->csb_end_offset = ntohl(cd->codeLimit);
0a7de745 3281 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 3282 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3283 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3284 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3285 } else {
3286 blob->csb_start_offset = 0;
3287 }
d9a64523
A
3288
3289 return 0;
593a1d5f 3290}
39236c6e 3291
d9a64523
A
3292/*
3293 * Validate the code signature blob, create a struct cs_blob wrapper
3294 * and return it together with a pointer to the chosen code directory
3295 * and entitlements blob.
3296 *
3297 * Note that this takes ownership of the memory as addr, mainly because
3298 * this function can actually replace the passed in blob with another
3299 * one, e.g. when performing multilevel hashing optimization.
3300 */
2d21ac55 3301int
d9a64523
A
3302cs_blob_create_validated(
3303 vm_address_t * const addr,
3304 vm_size_t size,
3305 struct cs_blob ** const ret_blob,
0a7de745 3306 CS_CodeDirectory const ** const ret_cd)
91447636 3307{
0a7de745
A
3308 struct cs_blob *blob;
3309 int error = EINVAL;
2d21ac55 3310 const CS_CodeDirectory *cd;
39037602 3311 const CS_GenericBlob *entitlements;
0a7de745
A
3312 union cs_hash_union mdctx;
3313 size_t length;
15129b1c 3314
0a7de745
A
3315 if (ret_blob) {
3316 *ret_blob = NULL;
3317 }
2d21ac55 3318
0a7de745 3319 blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
2d21ac55
A
3320 if (blob == NULL) {
3321 return ENOMEM;
3322 }
3323
2d21ac55 3324 /* fill in the new blob */
2d21ac55
A
3325 blob->csb_mem_size = size;
3326 blob->csb_mem_offset = 0;
c3c9b80d 3327 blob->csb_mem_kaddr = (void *)*addr;
39236c6e 3328 blob->csb_flags = 0;
5ba3f43e 3329 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
fe8ab488 3330 blob->csb_platform_binary = 0;
3e170ce0 3331 blob->csb_platform_path = 0;
fe8ab488 3332 blob->csb_teamid = NULL;
f427ee49
A
3333#if CONFIG_SUPPLEMENTAL_SIGNATURES
3334 blob->csb_supplement_teamid = NULL;
3335#endif
39037602
A
3336 blob->csb_entitlements_blob = NULL;
3337 blob->csb_entitlements = NULL;
d9a64523
A
3338 blob->csb_reconstituted = false;
3339
39037602
A
3340 /* Transfer ownership. Even on error, this function will deallocate */
3341 *addr = 0;
3342
2d21ac55
A
3343 /*
3344 * Validate the blob's contents
3345 */
813fb2f6
A
3346 length = (size_t) size;
3347 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 3348 length, &cd, &entitlements);
39236c6e 3349 if (error) {
0a7de745 3350 if (cs_debug) {
39236c6e 3351 printf("CODESIGNING: csblob invalid: %d\n", error);
0a7de745 3352 }
813fb2f6
A
3353 /*
3354 * The vnode checker can't make the rest of this function
3355 * succeed if csblob validation failed, so bail */
3356 goto out;
2d21ac55 3357 } else {
3e170ce0
A
3358 const unsigned char *md_base;
3359 uint8_t hash[CS_HASH_MAX_SIZE];
3360 int md_size;
f427ee49 3361 vm_offset_t hash_pagemask;
3e170ce0 3362
490019cf 3363 blob->csb_cd = cd;
39037602 3364 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
c3c9b80d
A
3365 if (blob->csb_entitlements_blob != NULL) {
3366 blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
3367 ntohl(blob->csb_entitlements_blob->length),
3368 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
3369 PTRAUTH_ADDR_DIVERSIFY);
3370 }
3e170ce0 3371 blob->csb_hashtype = cs_find_md(cd->hashType);
0a7de745 3372 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
3e170ce0 3373 panic("validated CodeDirectory but unsupported type");
0a7de745 3374 }
39037602
A
3375
3376 blob->csb_hash_pageshift = cd->pageSize;
f427ee49
A
3377 hash_pagemask = (1U << cd->pageSize) - 1;
3378 blob->csb_hash_firstlevel_pageshift = 0;
39236c6e 3379 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
f427ee49 3380 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
0a7de745 3381 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39236c6e 3382 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3383 ((const char*)cd + ntohl(cd->scatterOffset));
f427ee49 3384 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
b0d623f7 3385 } else {
3e170ce0 3386 blob->csb_start_offset = 0;
b0d623f7 3387 }
3e170ce0
A
3388 /* compute the blob's cdhash */
3389 md_base = (const unsigned char *) cd;
3390 md_size = ntohl(cd->length);
3391
3392 blob->csb_hashtype->cs_init(&mdctx);
3393 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3394 blob->csb_hashtype->cs_final(hash, &mdctx);
3395
3396 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
f427ee49
A
3397 blob->csb_cdhash_signature = ptrauth_utils_sign_blob_generic(blob->csb_cdhash,
3398 sizeof(blob->csb_cdhash),
3399 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
3400 PTRAUTH_ADDR_DIVERSIFY);
3401
3402#if CONFIG_SUPPLEMENTAL_SIGNATURES
3403 blob->csb_linkage_hashtype = NULL;
3404 if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
3405 ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
3406 blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
3407
3408 if (blob->csb_linkage_hashtype != NULL) {
3409 memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
3410 CS_CDHASH_LEN);
3411 }
3412 }
3413#endif
2d21ac55
A
3414 }
3415
0a7de745 3416 error = 0;
d9a64523
A
3417
3418out:
0a7de745
A
3419 if (error != 0) {
3420 cs_blob_free(blob);
3421 blob = NULL;
3422 cd = NULL;
3423 }
3424
3425 if (ret_blob != NULL) {
3426 *ret_blob = blob;
3427 }
3428 if (ret_cd != NULL) {
3429 *ret_cd = cd;
3430 }
3431
3432 return error;
d9a64523
A
3433}
3434
3435/*
3436 * Free a cs_blob previously created by cs_blob_create_validated.
3437 */
3438void
3439cs_blob_free(
0a7de745 3440 struct cs_blob * const blob)
d9a64523 3441{
0a7de745
A
3442 if (blob != NULL) {
3443 if (blob->csb_mem_kaddr) {
c3c9b80d
A
3444 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3445 blob->csb_mem_kaddr = NULL;
0a7de745
A
3446 }
3447 if (blob->csb_entitlements != NULL) {
3448 osobject_release(blob->csb_entitlements);
3449 blob->csb_entitlements = NULL;
3450 }
3451 (kfree)(blob, sizeof(*blob));
3452 }
d9a64523 3453}
f427ee49
A
3454#if CONFIG_SUPPLEMENTAL_SIGNATURES
3455static void
3456cs_blob_supplement_free(struct cs_blob * const blob)
3457{
3458 if (blob != NULL) {
3459 if (blob->csb_supplement_teamid != NULL) {
3460 vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
3461 kfree(blob->csb_supplement_teamid, teamid_size);
3462 blob->csb_supplement_teamid = NULL;
3463 }
3464 cs_blob_free(blob);
3465 }
3466}
3467#endif
3468
3469static void
3470ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
3471{
3472 /* Note that the atomic ops are not enough to guarantee
3473 * correctness: If a blob with an intermediate size is inserted
3474 * concurrently, we can lose a peak value assignment. But these
3475 * statistics are only advisory anyway, so we're not going to
3476 * employ full locking here. (Consequently, we are also okay with
3477 * relaxed ordering of those accesses.)
3478 */
3479
3480 unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
3481 if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
3482 os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
3483 }
3484
3485 size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
3486
3487 if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
3488 os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
3489 }
3490 if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
3491 os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
3492 }
3493}
d9a64523
A
3494
3495int
3496ubc_cs_blob_add(
0a7de745 3497 struct vnode *vp,
f427ee49 3498 uint32_t platform,
0a7de745 3499 cpu_type_t cputype,
f427ee49 3500 cpu_subtype_t cpusubtype,
0a7de745
A
3501 off_t base_offset,
3502 vm_address_t *addr,
3503 vm_size_t size,
d9a64523 3504 struct image_params *imgp,
0a7de745
A
3505 __unused int flags,
3506 struct cs_blob **ret_blob)
d9a64523 3507{
0a7de745
A
3508 kern_return_t kr;
3509 struct ubc_info *uip;
f427ee49 3510 struct cs_blob *blob = NULL, *oblob = NULL;
0a7de745 3511 int error;
d9a64523 3512 CS_CodeDirectory const *cd;
0a7de745
A
3513 off_t blob_start_offset, blob_end_offset;
3514 boolean_t record_mtime;
d9a64523
A
3515
3516 record_mtime = FALSE;
0a7de745
A
3517 if (ret_blob) {
3518 *ret_blob = NULL;
3519 }
3520
3521 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3522 * Validates the passed in blob in the process. */
3523 error = cs_blob_create_validated(addr, size, &blob, &cd);
3524
3525 if (error != 0) {
d9a64523 3526 printf("malform code signature blob: %d\n", error);
0a7de745
A
3527 return error;
3528 }
d9a64523 3529
0a7de745 3530 blob->csb_cpu_type = cputype;
f427ee49 3531 blob->csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
d9a64523
A
3532 blob->csb_base_offset = base_offset;
3533
3534 /*
593a1d5f
A
3535 * Let policy module check whether the blob's signature is accepted.
3536 */
3537#if CONFIG_MACF
0a7de745 3538 unsigned int cs_flags = blob->csb_flags;
5ba3f43e 3539 unsigned int signer_type = blob->csb_signer_type;
f427ee49 3540 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
0a7de745 3541 blob->csb_flags = cs_flags;
5ba3f43e 3542 blob->csb_signer_type = signer_type;
39037602 3543
fe8ab488 3544 if (error) {
0a7de745 3545 if (cs_debug) {
fe8ab488 3546 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
0a7de745 3547 }
593a1d5f 3548 goto out;
fe8ab488 3549 }
39037602 3550 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
0a7de745 3551 if (cs_debug) {
c18c124e 3552 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
0a7de745 3553 }
c18c124e
A
3554 error = EPERM;
3555 goto out;
3556 }
5ba3f43e
A
3557#endif
3558
d9a64523
A
3559#if CONFIG_ENFORCE_SIGNED_CODE
3560 /*
3561 * Reconstitute code signature
3562 */
3563 {
3564 vm_address_t new_mem_kaddr = 0;
3565 vm_size_t new_mem_size = 0;
3566
3567 CS_CodeDirectory *new_cd = NULL;
3568 CS_GenericBlob const *new_entitlements = NULL;
3569
3570 error = ubc_cs_reconstitute_code_signature(blob, 0,
0a7de745
A
3571 &new_mem_kaddr, &new_mem_size,
3572 &new_cd, &new_entitlements);
d9a64523
A
3573
3574 if (error != 0) {
3575 printf("failed code signature reconstitution: %d\n", error);
3576 goto out;
3577 }
3578
c3c9b80d 3579 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
d9a64523 3580
c3c9b80d 3581 blob->csb_mem_kaddr = (void *)new_mem_kaddr;
d9a64523
A
3582 blob->csb_mem_size = new_mem_size;
3583 blob->csb_cd = new_cd;
3584 blob->csb_entitlements_blob = new_entitlements;
c3c9b80d
A
3585 if (blob->csb_entitlements_blob != NULL) {
3586 blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
3587 ntohl(blob->csb_entitlements_blob->length),
3588 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
3589 PTRAUTH_ADDR_DIVERSIFY);
3590 }
d9a64523
A
3591 blob->csb_reconstituted = true;
3592 }
d9a64523
A
3593#endif
3594
3595
39037602 3596 if (blob->csb_flags & CS_PLATFORM_BINARY) {
0a7de745 3597 if (cs_debug > 1) {
fe8ab488 3598 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
0a7de745 3599 }
fe8ab488 3600 blob->csb_platform_binary = 1;
39037602 3601 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
fe8ab488
A
3602 } else {
3603 blob->csb_platform_binary = 0;
3e170ce0
A
3604 blob->csb_platform_path = 0;
3605 blob->csb_teamid = csblob_parse_teamid(blob);
fe8ab488 3606 if (cs_debug > 1) {
0a7de745 3607 if (blob->csb_teamid) {
fe8ab488 3608 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
0a7de745 3609 } else {
fe8ab488 3610 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
0a7de745 3611 }
fe8ab488
A
3612 }
3613 }
39037602 3614
2d21ac55
A
3615 /*
3616 * Validate the blob's coverage
3617 */
3618 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3619 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3620
cf7d32b8
A
3621 if (blob_start_offset >= blob_end_offset ||
3622 blob_start_offset < 0 ||
3623 blob_end_offset <= 0) {
2d21ac55
A
3624 /* reject empty or backwards blob */
3625 error = EINVAL;
3626 goto out;
3627 }
3628
39037602 3629 if (ubc_cs_supports_multilevel_hash(blob)) {
d9a64523
A
3630 error = ubc_cs_convert_to_multilevel_hash(blob);
3631 if (error != 0) {
3632 printf("failed multilevel hash conversion: %d\n", error);
3633 goto out;
3634 }
3635 blob->csb_reconstituted = true;
39037602
A
3636 }
3637
2d21ac55 3638 vnode_lock(vp);
0a7de745 3639 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3640 vnode_unlock(vp);
3641 error = ENOENT;
3642 goto out;
3643 }
3644 uip = vp->v_ubcinfo;
3645
3646 /* check if this new blob overlaps with an existing blob */
3647 for (oblob = uip->cs_blobs;
0a7de745
A
3648 oblob != NULL;
3649 oblob = oblob->csb_next) {
3650 off_t oblob_start_offset, oblob_end_offset;
3651
3652 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3653 vnode_unlock(vp);
3654 error = EALREADY;
3655 goto out;
3656 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
3657 if (!oblob->csb_platform_binary) {
3658 vnode_unlock(vp);
3659 error = EALREADY;
3660 goto out;
3661 }
3662 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3663 if (oblob->csb_platform_binary ||
fe8ab488
A
3664 oblob->csb_teamid == NULL ||
3665 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3666 vnode_unlock(vp);
3667 error = EALREADY;
3668 goto out;
3669 }
0a7de745
A
3670 } else { // non teamid binary needs to be the same for app slices
3671 if (oblob->csb_platform_binary ||
3672 oblob->csb_teamid != NULL) {
fe8ab488
A
3673 vnode_unlock(vp);
3674 error = EALREADY;
3675 goto out;
3676 }
0a7de745 3677 }
2d21ac55 3678
0a7de745
A
3679 oblob_start_offset = (oblob->csb_base_offset +
3680 oblob->csb_start_offset);
3681 oblob_end_offset = (oblob->csb_base_offset +
3682 oblob->csb_end_offset);
3683 if (blob_start_offset >= oblob_end_offset ||
3684 blob_end_offset <= oblob_start_offset) {
3685 /* no conflict with this existing blob */
3686 } else {
3687 /* conflict ! */
3688 if (blob_start_offset == oblob_start_offset &&
3689 blob_end_offset == oblob_end_offset &&
3690 blob->csb_mem_size == oblob->csb_mem_size &&
3691 blob->csb_flags == oblob->csb_flags &&
3692 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3693 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3694 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3695 !bcmp(blob->csb_cdhash,
3696 oblob->csb_cdhash,
3697 CS_CDHASH_LEN)) {
3698 /*
3699 * We already have this blob:
3700 * we'll return success but
3701 * throw away the new blob.
3702 */
3703 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3704 /*
3705 * The old blob matches this one
3706 * but doesn't have any CPU type.
3707 * Update it with whatever the caller
3708 * provided this time.
3709 */
3710 oblob->csb_cpu_type = cputype;
3711 }
3712
3713 /* The signature is still accepted, so update the
3714 * generation count. */
3715 uip->cs_add_gen = cs_blob_generation_count;
3716
3717 vnode_unlock(vp);
3718 if (ret_blob) {
3719 *ret_blob = oblob;
3720 }
3721 error = EAGAIN;
3722 goto out;
3723 } else {
3724 /* different blob: reject the new one */
3725 vnode_unlock(vp);
3726 error = EALREADY;
3727 goto out;
3728 }
3729 }
2d21ac55
A
3730 }
3731
fe8ab488 3732
2d21ac55
A
3733 /* mark this vnode's VM object as having "signed pages" */
3734 kr = memory_object_signed(uip->ui_control, TRUE);
3735 if (kr != KERN_SUCCESS) {
3736 vnode_unlock(vp);
3737 error = ENOENT;
3738 goto out;
3739 }
3740
15129b1c
A
3741 if (uip->cs_blobs == NULL) {
3742 /* loading 1st blob: record the file's current "modify time" */
3743 record_mtime = TRUE;
3744 }
3745
fe8ab488
A
3746 /* set the generation count for cs_blobs */
3747 uip->cs_add_gen = cs_blob_generation_count;
3748
2d21ac55
A
3749 /*
3750 * Add this blob to the list of blobs for this vnode.
3751 * We always add at the front of the list and we never remove a
3752 * blob from the list, so ubc_cs_get_blobs() can return whatever
3753 * the top of the list was and that list will remain valid
3754 * while we validate a page, even after we release the vnode's lock.
3755 */
3756 blob->csb_next = uip->cs_blobs;
3757 uip->cs_blobs = blob;
3758
f427ee49 3759 ubc_cs_blob_adjust_statistics(blob);
2d21ac55 3760
c331a0be 3761 if (cs_debug > 1) {
2d21ac55 3762 proc_t p;
39236c6e 3763 const char *name = vnode_getname_printable(vp);
2d21ac55
A
3764 p = current_proc();
3765 printf("CODE SIGNING: proc %d(%s) "
0a7de745
A
3766 "loaded %s signatures for file (%s) "
3767 "range 0x%llx:0x%llx flags 0x%x\n",
3768 p->p_pid, p->p_comm,
3769 blob->csb_cpu_type == -1 ? "detached" : "embedded",
3770 name,
3771 blob->csb_base_offset + blob->csb_start_offset,
3772 blob->csb_base_offset + blob->csb_end_offset,
3773 blob->csb_flags);
39236c6e 3774 vnode_putname_printable(name);
2d21ac55
A
3775 }
3776
2d21ac55
A
3777 vnode_unlock(vp);
3778
15129b1c
A
3779 if (record_mtime) {
3780 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3781 }
3782
0a7de745 3783 if (ret_blob) {
3e170ce0 3784 *ret_blob = blob;
0a7de745 3785 }
3e170ce0 3786
0a7de745 3787 error = 0; /* success ! */
2d21ac55
A
3788
3789out:
3790 if (error) {
0a7de745 3791 if (cs_debug) {
fe8ab488 3792 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
0a7de745 3793 }
fe8ab488 3794
0a7de745 3795 cs_blob_free(blob);
2d21ac55
A
3796 }
3797
3798 if (error == EAGAIN) {
3799 /*
0a7de745 3800 * See above: error is EAGAIN if we were asked
2d21ac55
A
3801 * to add an existing blob again. We cleaned the new
3802 * blob and we want to return success.
3803 */
3804 error = 0;
2d21ac55
A
3805 }
3806
3807 return error;
91447636
A
3808}
3809
f427ee49
A
3810#if CONFIG_SUPPLEMENTAL_SIGNATURES
3811int
3812ubc_cs_blob_add_supplement(
3813 struct vnode *vp,
3814 struct vnode *orig_vp,
3815 off_t base_offset,
3816 vm_address_t *addr,
3817 vm_size_t size,
3818 struct cs_blob **ret_blob)
3819{
3820 kern_return_t kr;
3821 struct ubc_info *uip, *orig_uip;
3822 int error;
3823 struct cs_blob *blob, *orig_blob;
3824 CS_CodeDirectory const *cd;
3825 off_t blob_start_offset, blob_end_offset;
3826
3827 if (ret_blob) {
3828 *ret_blob = NULL;
3829 }
3830
3831 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3832 * Validates the passed in blob in the process. */
3833 error = cs_blob_create_validated(addr, size, &blob, &cd);
3834
3835 if (error != 0) {
3836 printf("malformed code signature supplement blob: %d\n", error);
3837 return error;
3838 }
3839
3840 blob->csb_cpu_type = -1;
3841 blob->csb_base_offset = base_offset;
3842
3843 blob->csb_reconstituted = false;
3844
3845 vnode_lock(orig_vp);
3846 if (!UBCINFOEXISTS(orig_vp)) {
3847 vnode_unlock(orig_vp);
3848 error = ENOENT;
3849 goto out;
3850 }
3851
3852 orig_uip = orig_vp->v_ubcinfo;
3853
3854 /* check that the supplement's linked cdhash matches a cdhash of
3855 * the target image.
3856 */
3857
3858 if (blob->csb_linkage_hashtype == NULL) {
3859 proc_t p;
3860 const char *iname = vnode_getname_printable(vp);
3861 p = current_proc();
3862
3863 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
3864 "is not a supplemental.\n",
3865 p->p_pid, p->p_comm, iname);
3866
3867 error = EINVAL;
3868
3869 vnode_putname_printable(iname);
3870 vnode_unlock(orig_vp);
3871 goto out;
3872 }
3873
3874 for (orig_blob = orig_uip->cs_blobs; orig_blob != NULL;
3875 orig_blob = orig_blob->csb_next) {
3876 ptrauth_utils_auth_blob_generic(orig_blob->csb_cdhash,
3877 sizeof(orig_blob->csb_cdhash),
3878 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
3879 PTRAUTH_ADDR_DIVERSIFY,
3880 orig_blob->csb_cdhash_signature);
3881 if (orig_blob->csb_hashtype == blob->csb_linkage_hashtype &&
3882 memcmp(orig_blob->csb_cdhash, blob->csb_linkage, CS_CDHASH_LEN) == 0) {
3883 // Found match!
3884 break;
3885 }
3886 }
3887
3888 if (orig_blob == NULL) {
3889 // Not found.
3890
3891 proc_t p;
3892 const char *iname = vnode_getname_printable(vp);
3893 p = current_proc();
3894
3895 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
3896 "does not match any attached cdhash.\n",
3897 p->p_pid, p->p_comm, iname);
3898
3899 error = ESRCH;
3900
3901 vnode_putname_printable(iname);
3902 vnode_unlock(orig_vp);
3903 goto out;
3904 }
3905
3906 vnode_unlock(orig_vp);
3907
3908 // validate the signature against policy!
3909#if CONFIG_MACF
3910 unsigned int signer_type = blob->csb_signer_type;
3911 error = mac_vnode_check_supplemental_signature(vp, blob, orig_vp, orig_blob, &signer_type);
3912 blob->csb_signer_type = signer_type;
3913
3914
3915 if (error) {
3916 if (cs_debug) {
3917 printf("check_supplemental_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3918 }
3919 goto out;
3920 }
3921#endif
3922
3923 // We allowed the supplemental signature blob so
3924 // copy the platform bit or team-id from the linked signature and whether or not the original is developer code
3925 blob->csb_platform_binary = 0;
3926 blob->csb_platform_path = 0;
3927 if (orig_blob->csb_platform_binary == 1) {
3928 blob->csb_platform_binary = orig_blob->csb_platform_binary;
3929 blob->csb_platform_path = orig_blob->csb_platform_path;
3930 } else if (orig_blob->csb_teamid != NULL) {
3931 vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
3932 blob->csb_supplement_teamid = kalloc(teamid_size);
3933 if (blob->csb_supplement_teamid == NULL) {
3934 error = ENOMEM;
3935 goto out;
3936 }
3937 strlcpy(blob->csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
3938 }
3939 blob->csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
3940
3941 // Validate the blob's coverage
3942 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3943 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3944
3945 if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
3946 /* reject empty or backwards blob */
3947 error = EINVAL;
3948 goto out;
3949 }
3950
3951 vnode_lock(vp);
3952 if (!UBCINFOEXISTS(vp)) {
3953 vnode_unlock(vp);
3954 error = ENOENT;
3955 goto out;
3956 }
3957 uip = vp->v_ubcinfo;
3958
3959 struct cs_blob *existing = uip->cs_blob_supplement;
3960 if (existing != NULL) {
3961 if (blob->csb_hashtype == existing->csb_hashtype &&
3962 memcmp(blob->csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
3963 error = EAGAIN; // non-fatal
3964 } else {
3965 error = EALREADY; // fatal
3966 }
3967
3968 vnode_unlock(vp);
3969 goto out;
3970 }
3971
3972 /* Unlike regular cs_blobs, we only ever support one supplement. */
3973 blob->csb_next = NULL;
3974 uip->cs_blob_supplement = blob;
3975
3976 /* mark this vnode's VM object as having "signed pages" */
3977 kr = memory_object_signed(uip->ui_control, TRUE);
3978 if (kr != KERN_SUCCESS) {
3979 vnode_unlock(vp);
3980 error = ENOENT;
3981 goto out;
3982 }
3983
3984 vnode_unlock(vp);
3985
3986 /* We still adjust statistics even for supplemental blobs, as they
3987 * consume memory just the same. */
3988 ubc_cs_blob_adjust_statistics(blob);
3989
3990 if (cs_debug > 1) {
3991 proc_t p;
3992 const char *name = vnode_getname_printable(vp);
3993 p = current_proc();
3994 printf("CODE SIGNING: proc %d(%s) "
3995 "loaded supplemental signature for file (%s) "
3996 "range 0x%llx:0x%llx\n",
3997 p->p_pid, p->p_comm,
3998 name,
3999 blob->csb_base_offset + blob->csb_start_offset,
4000 blob->csb_base_offset + blob->csb_end_offset);
4001 vnode_putname_printable(name);
4002 }
4003
4004 if (ret_blob) {
4005 *ret_blob = blob;
4006 }
4007
4008 error = 0; // Success!
4009out:
4010 if (error) {
4011 if (cs_debug) {
4012 printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", current_proc()->p_pid, error);
4013 }
4014
4015 cs_blob_supplement_free(blob);
4016 }
4017
4018 if (error == EAGAIN) {
4019 /* We were asked to add an existing blob.
4020 * We cleaned up and ignore the attempt. */
4021 error = 0;
4022 }
4023
4024 return error;
4025}
4026#endif
4027
4028
4029
3e170ce0
A
4030void
4031csvnode_print_debug(struct vnode *vp)
4032{
0a7de745
A
4033 const char *name = NULL;
4034 struct ubc_info *uip;
3e170ce0
A
4035 struct cs_blob *blob;
4036
4037 name = vnode_getname_printable(vp);
4038 if (name) {
4039 printf("csvnode: name: %s\n", name);
4040 vnode_putname_printable(name);
4041 }
4042
4043 vnode_lock_spin(vp);
4044
0a7de745 4045 if (!UBCINFOEXISTS(vp)) {
3e170ce0
A
4046 blob = NULL;
4047 goto out;
4048 }
4049
4050 uip = vp->v_ubcinfo;
4051 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
4052 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
0a7de745
A
4053 (unsigned long)blob->csb_start_offset,
4054 (unsigned long)blob->csb_end_offset,
4055 blob->csb_flags,
4056 blob->csb_platform_binary ? "yes" : "no",
4057 blob->csb_platform_path ? "yes" : "no",
4058 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3e170ce0
A
4059 }
4060
4061out:
4062 vnode_unlock(vp);
3e170ce0
A
4063}
4064
f427ee49
A
4065#if CONFIG_SUPPLEMENTAL_SIGNATURES
4066struct cs_blob *
4067ubc_cs_blob_get_supplement(
4068 struct vnode *vp,
4069 off_t offset)
4070{
4071 struct cs_blob *blob;
4072 off_t offset_in_blob;
4073
4074 vnode_lock_spin(vp);
4075
4076 if (!UBCINFOEXISTS(vp)) {
4077 blob = NULL;
4078 goto out;
4079 }
4080
4081 blob = vp->v_ubcinfo->cs_blob_supplement;
4082
4083 if (blob == NULL) {
4084 // no supplemental blob
4085 goto out;
4086 }
4087
4088
4089 if (offset != -1) {
4090 offset_in_blob = offset - blob->csb_base_offset;
4091 if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
4092 // not actually covered by this blob
4093 blob = NULL;
4094 }
4095 }
4096
4097out:
4098 vnode_unlock(vp);
4099
4100 return blob;
4101}
4102#endif
4103
2d21ac55
A
4104struct cs_blob *
4105ubc_cs_blob_get(
0a7de745
A
4106 struct vnode *vp,
4107 cpu_type_t cputype,
f427ee49 4108 cpu_subtype_t cpusubtype,
0a7de745 4109 off_t offset)
91447636 4110{
0a7de745
A
4111 struct ubc_info *uip;
4112 struct cs_blob *blob;
2d21ac55
A
4113 off_t offset_in_blob;
4114
4115 vnode_lock_spin(vp);
4116
0a7de745 4117 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
4118 blob = NULL;
4119 goto out;
4120 }
4121
4122 uip = vp->v_ubcinfo;
4123 for (blob = uip->cs_blobs;
0a7de745
A
4124 blob != NULL;
4125 blob = blob->csb_next) {
f427ee49 4126 if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
2d21ac55
A
4127 break;
4128 }
4129 if (offset != -1) {
4130 offset_in_blob = offset - blob->csb_base_offset;
4131 if (offset_in_blob >= blob->csb_start_offset &&
4132 offset_in_blob < blob->csb_end_offset) {
4133 /* our offset is covered by this blob */
4134 break;
4135 }
4136 }
4137 }
4138
4139out:
4140 vnode_unlock(vp);
4141
4142 return blob;
91447636 4143}
2d21ac55
A
4144
4145static void
4146ubc_cs_free(
0a7de745 4147 struct ubc_info *uip)
91447636 4148{
0a7de745 4149 struct cs_blob *blob, *next_blob;
2d21ac55
A
4150
4151 for (blob = uip->cs_blobs;
0a7de745
A
4152 blob != NULL;
4153 blob = next_blob) {
2d21ac55 4154 next_blob = blob->csb_next;
f427ee49
A
4155 os_atomic_add(&cs_blob_count, -1, relaxed);
4156 os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
d9a64523 4157 cs_blob_free(blob);
2d21ac55 4158 }
6d2010ae
A
4159#if CHECK_CS_VALIDATION_BITMAP
4160 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
4161#endif
2d21ac55 4162 uip->cs_blobs = NULL;
f427ee49
A
4163#if CONFIG_SUPPLEMENTAL_SIGNATURES
4164 if (uip->cs_blob_supplement != NULL) {
4165 blob = uip->cs_blob_supplement;
4166 os_atomic_add(&cs_blob_count, -1, relaxed);
4167 os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4168 cs_blob_supplement_free(uip->cs_blob_supplement);
4169 uip->cs_blob_supplement = NULL;
4170 }
4171#endif
91447636 4172}
2d21ac55 4173
fe8ab488
A
4174/* check cs blob generation on vnode
4175 * returns:
4176 * 0 : Success, the cs_blob attached is current
4177 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
4178 */
4179int
4180ubc_cs_generation_check(
0a7de745 4181 struct vnode *vp)
fe8ab488
A
4182{
4183 int retval = ENEEDAUTH;
4184
4185 vnode_lock_spin(vp);
4186
4187 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
4188 retval = 0;
4189 }
4190
4191 vnode_unlock(vp);
4192 return retval;
4193}
4194
4195int
4196ubc_cs_blob_revalidate(
0a7de745 4197 struct vnode *vp,
c18c124e 4198 struct cs_blob *blob,
39037602 4199 struct image_params *imgp,
f427ee49
A
4200 int flags,
4201 uint32_t platform
fe8ab488
A
4202 )
4203{
4204 int error = 0;
fe8ab488 4205 const CS_CodeDirectory *cd = NULL;
39037602 4206 const CS_GenericBlob *entitlements = NULL;
813fb2f6 4207 size_t size;
fe8ab488
A
4208 assert(vp != NULL);
4209 assert(blob != NULL);
4210
813fb2f6
A
4211 size = blob->csb_mem_size;
4212 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 4213 size, &cd, &entitlements);
fe8ab488
A
4214 if (error) {
4215 if (cs_debug) {
4216 printf("CODESIGNING: csblob invalid: %d\n", error);
4217 }
4218 goto out;
4219 }
4220
0a7de745
A
4221 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4222 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
d9a64523
A
4223
4224 if (blob->csb_reconstituted) {
4225 /*
4226 * Code signatures that have been modified after validation
4227 * cannot be revalidated inline from their in-memory blob.
4228 *
4229 * That's okay, though, because the only path left that relies
4230 * on revalidation of existing in-memory blobs is the legacy
4231 * detached signature database path, which only exists on macOS,
4232 * which does not do reconstitution of any kind.
4233 */
4234 if (cs_debug) {
4235 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
4236 }
4237
4238 /*
4239 * EAGAIN tells the caller that they may reread the code
4240 * signature and try attaching it again, which is the same
4241 * thing they would do if there was no cs_blob yet in the
4242 * first place.
4243 *
4244 * Conveniently, after ubc_cs_blob_add did a successful
4245 * validation, it will detect that a matching cs_blob (cdhash,
4246 * offset, arch etc.) already exists, and return success
4247 * without re-adding a cs_blob to the vnode.
4248 */
4249 return EAGAIN;
4250 }
4251
fe8ab488
A
4252 /* callout to mac_vnode_check_signature */
4253#if CONFIG_MACF
f427ee49 4254 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
fe8ab488 4255 if (cs_debug && error) {
0a7de745 4256 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
fe8ab488 4257 }
39037602
A
4258#else
4259 (void)flags;
5ba3f43e 4260 (void)signer_type;
fe8ab488
A
4261#endif
4262
4263 /* update generation number if success */
4264 vnode_lock_spin(vp);
0a7de745 4265 blob->csb_flags = cs_flags;
5ba3f43e 4266 blob->csb_signer_type = signer_type;
fe8ab488 4267 if (UBCINFOEXISTS(vp)) {
0a7de745 4268 if (error == 0) {
fe8ab488 4269 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
0a7de745 4270 } else {
fe8ab488 4271 vp->v_ubcinfo->cs_add_gen = 0;
0a7de745 4272 }
fe8ab488
A
4273 }
4274
4275 vnode_unlock(vp);
4276
4277out:
4278 return error;
4279}
4280
4281void
4282cs_blob_reset_cache()
4283{
4284 /* incrementing odd no by 2 makes sure '0' is never reached. */
4285 OSAddAtomic(+2, &cs_blob_generation_count);
4286 printf("Reseting cs_blob cache from all vnodes. \n");
4287}
4288
2d21ac55
A
4289struct cs_blob *
4290ubc_get_cs_blobs(
0a7de745 4291 struct vnode *vp)
91447636 4292{
0a7de745
A
4293 struct ubc_info *uip;
4294 struct cs_blob *blobs;
2d21ac55 4295
b0d623f7
A
4296 /*
4297 * No need to take the vnode lock here. The caller must be holding
4298 * a reference on the vnode (via a VM mapping or open file descriptor),
4299 * so the vnode will not go away. The ubc_info stays until the vnode
4300 * goes away. And we only modify "blobs" by adding to the head of the
4301 * list.
4302 * The ubc_info could go away entirely if the vnode gets reclaimed as
4303 * part of a forced unmount. In the case of a code-signature validation
4304 * during a page fault, the "paging_in_progress" reference on the VM
4305 * object guarantess that the vnode pager (and the ubc_info) won't go
4306 * away during the fault.
4307 * Other callers need to protect against vnode reclaim by holding the
4308 * vnode lock, for example.
4309 */
2d21ac55 4310
0a7de745 4311 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
4312 blobs = NULL;
4313 goto out;
4314 }
4315
4316 uip = vp->v_ubcinfo;
4317 blobs = uip->cs_blobs;
4318
4319out:
2d21ac55 4320 return blobs;
91447636 4321}
2d21ac55 4322
f427ee49
A
4323#if CONFIG_SUPPLEMENTAL_SIGNATURES
4324struct cs_blob *
4325ubc_get_cs_supplement(
4326 struct vnode *vp)
4327{
4328 struct ubc_info *uip;
4329 struct cs_blob *blob;
4330
4331 /*
4332 * No need to take the vnode lock here. The caller must be holding
4333 * a reference on the vnode (via a VM mapping or open file descriptor),
4334 * so the vnode will not go away. The ubc_info stays until the vnode
4335 * goes away.
4336 * The ubc_info could go away entirely if the vnode gets reclaimed as
4337 * part of a forced unmount. In the case of a code-signature validation
4338 * during a page fault, the "paging_in_progress" reference on the VM
4339 * object guarantess that the vnode pager (and the ubc_info) won't go
4340 * away during the fault.
4341 * Other callers need to protect against vnode reclaim by holding the
4342 * vnode lock, for example.
4343 */
4344
4345 if (!UBCINFOEXISTS(vp)) {
4346 blob = NULL;
4347 goto out;
4348 }
4349
4350 uip = vp->v_ubcinfo;
4351 blob = uip->cs_blob_supplement;
4352
4353out:
4354 return blob;
4355}
4356#endif
4357
4358
15129b1c
A
4359void
4360ubc_get_cs_mtime(
0a7de745
A
4361 struct vnode *vp,
4362 struct timespec *cs_mtime)
15129b1c 4363{
0a7de745 4364 struct ubc_info *uip;
15129b1c 4365
0a7de745 4366 if (!UBCINFOEXISTS(vp)) {
15129b1c
A
4367 cs_mtime->tv_sec = 0;
4368 cs_mtime->tv_nsec = 0;
4369 return;
4370 }
4371
4372 uip = vp->v_ubcinfo;
4373 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
4374 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
4375}
4376
2d21ac55
A
4377unsigned long cs_validate_page_no_hash = 0;
4378unsigned long cs_validate_page_bad_hash = 0;
39037602
A
4379static boolean_t
4380cs_validate_hash(
0a7de745
A
4381 struct cs_blob *blobs,
4382 memory_object_t pager,
4383 memory_object_offset_t page_offset,
4384 const void *data,
4385 vm_size_t *bytes_processed,
4386 unsigned *tainted)
91447636 4387{
0a7de745
A
4388 union cs_hash_union mdctx;
4389 struct cs_hash const *hashtype = NULL;
4390 unsigned char actual_hash[CS_HASH_MAX_SIZE];
4391 unsigned char expected_hash[CS_HASH_MAX_SIZE];
4392 boolean_t found_hash;
4393 struct cs_blob *blob;
4394 const CS_CodeDirectory *cd;
4395 const unsigned char *hash;
4396 boolean_t validated;
4397 off_t offset; /* page offset in the file */
4398 size_t size;
4399 off_t codeLimit = 0;
4400 const char *lower_bound, *upper_bound;
4401 vm_offset_t kaddr, blob_addr;
2d21ac55
A
4402
4403 /* retrieve the expected hash */
4404 found_hash = FALSE;
2d21ac55
A
4405
4406 for (blob = blobs;
0a7de745
A
4407 blob != NULL;
4408 blob = blob->csb_next) {
2d21ac55
A
4409 offset = page_offset - blob->csb_base_offset;
4410 if (offset < blob->csb_start_offset ||
4411 offset >= blob->csb_end_offset) {
4412 /* our page is not covered by this blob */
4413 continue;
4414 }
4415
39037602 4416 /* blob data has been released */
c3c9b80d 4417 kaddr = (vm_offset_t)blob->csb_mem_kaddr;
2d21ac55 4418 if (kaddr == 0) {
39037602 4419 continue;
2d21ac55 4420 }
39236c6e 4421
2d21ac55 4422 blob_addr = kaddr + blob->csb_mem_offset;
2d21ac55
A
4423 lower_bound = CAST_DOWN(char *, blob_addr);
4424 upper_bound = lower_bound + blob->csb_mem_size;
0a7de745 4425
490019cf 4426 cd = blob->csb_cd;
2d21ac55 4427 if (cd != NULL) {
3e170ce0 4428 /* all CD's that have been injected is already validated */
b0d623f7 4429
3e170ce0 4430 hashtype = blob->csb_hashtype;
0a7de745 4431 if (hashtype == NULL) {
3e170ce0 4432 panic("unknown hash type ?");
0a7de745
A
4433 }
4434 if (hashtype->cs_digest_size > sizeof(actual_hash)) {
3e170ce0 4435 panic("hash size too large");
0a7de745 4436 }
f427ee49 4437 if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
39037602 4438 panic("offset not aligned to cshash boundary");
0a7de745 4439 }
3e170ce0 4440
2d21ac55 4441 codeLimit = ntohl(cd->codeLimit);
39236c6e 4442
0a7de745
A
4443 hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
4444 hashtype->cs_size,
4445 lower_bound, upper_bound);
cf7d32b8 4446 if (hash != NULL) {
490019cf 4447 bcopy(hash, expected_hash, hashtype->cs_size);
cf7d32b8
A
4448 found_hash = TRUE;
4449 }
2d21ac55 4450
2d21ac55
A
4451 break;
4452 }
4453 }
4454
4455 if (found_hash == FALSE) {
4456 /*
4457 * We can't verify this page because there is no signature
4458 * for it (yet). It's possible that this part of the object
4459 * is not signed, or that signatures for that part have not
4460 * been loaded yet.
4461 * Report that the page has not been validated and let the
4462 * caller decide if it wants to accept it or not.
4463 */
4464 cs_validate_page_no_hash++;
4465 if (cs_debug > 1) {
4466 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4467 "mobj %p off 0x%llx: no hash to validate !?\n",
4468 pager, page_offset);
2d21ac55
A
4469 }
4470 validated = FALSE;
c18c124e 4471 *tainted = 0;
2d21ac55 4472 } else {
c18c124e
A
4473 *tainted = 0;
4474
f427ee49 4475 size = (1U << blob->csb_hash_pageshift);
39037602
A
4476 *bytes_processed = size;
4477
fe8ab488 4478 const uint32_t *asha1, *esha1;
b0d623f7 4479 if ((off_t)(offset + size) > codeLimit) {
2d21ac55
A
4480 /* partial page at end of segment */
4481 assert(offset < codeLimit);
f427ee49 4482 size = (size_t) (codeLimit & (size - 1));
c18c124e 4483 *tainted |= CS_VALIDATE_NX;
2d21ac55 4484 }
3e170ce0
A
4485
4486 hashtype->cs_init(&mdctx);
39037602 4487
f427ee49 4488 if (blob->csb_hash_firstlevel_pageshift) {
39037602
A
4489 const unsigned char *partial_data = (const unsigned char *)data;
4490 size_t i;
0a7de745
A
4491 for (i = 0; i < size;) {
4492 union cs_hash_union partialctx;
39037602 4493 unsigned char partial_digest[CS_HASH_MAX_SIZE];
f427ee49 4494 size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
39037602
A
4495
4496 hashtype->cs_init(&partialctx);
4497 hashtype->cs_update(&partialctx, partial_data, partial_size);
4498 hashtype->cs_final(partial_digest, &partialctx);
4499
4500 /* Update cumulative multi-level hash */
4501 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
4502 partial_data = partial_data + partial_size;
4503 i += partial_size;
4504 }
4505 } else {
4506 hashtype->cs_update(&mdctx, data, size);
4507 }
3e170ce0 4508 hashtype->cs_final(actual_hash, &mdctx);
2d21ac55 4509
fe8ab488
A
4510 asha1 = (const uint32_t *) actual_hash;
4511 esha1 = (const uint32_t *) expected_hash;
4512
490019cf 4513 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
2d21ac55
A
4514 if (cs_debug) {
4515 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4516 "mobj %p off 0x%llx size 0x%lx: "
4517 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4518 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4519 pager, page_offset, size,
4520 asha1[0], asha1[1], asha1[2],
4521 asha1[3], asha1[4],
4522 esha1[0], esha1[1], esha1[2],
4523 esha1[3], esha1[4]);
2d21ac55
A
4524 }
4525 cs_validate_page_bad_hash++;
c18c124e 4526 *tainted |= CS_VALIDATE_TAINTED;
2d21ac55 4527 } else {
39236c6e 4528 if (cs_debug > 10) {
2d21ac55 4529 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4530 "mobj %p off 0x%llx size 0x%lx: "
4531 "SHA1 OK\n",
4532 pager, page_offset, size);
2d21ac55 4533 }
2d21ac55
A
4534 }
4535 validated = TRUE;
4536 }
0a7de745 4537
2d21ac55 4538 return validated;
91447636
A
4539}
4540
39037602
A
4541boolean_t
4542cs_validate_range(
0a7de745
A
4543 struct vnode *vp,
4544 memory_object_t pager,
4545 memory_object_offset_t page_offset,
4546 const void *data,
4547 vm_size_t dsize,
4548 unsigned *tainted)
39037602
A
4549{
4550 vm_size_t offset_in_range;
4551 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4552
4553 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4554
f427ee49
A
4555#if CONFIG_SUPPLEMENTAL_SIGNATURES
4556 if (blobs == NULL && proc_is_translated(current_proc())) {
4557 struct cs_blob *supp = ubc_get_cs_supplement(vp);
4558
4559 if (supp != NULL) {
4560 blobs = supp;
4561 } else {
4562 return FALSE;
4563 }
4564 }
4565#endif
4566
4567
4568
39037602
A
4569 *tainted = 0;
4570
4571 for (offset_in_range = 0;
0a7de745
A
4572 offset_in_range < dsize;
4573 /* offset_in_range updated based on bytes processed */) {
39037602
A
4574 unsigned subrange_tainted = 0;
4575 boolean_t subrange_validated;
4576 vm_size_t bytes_processed = 0;
4577
4578 subrange_validated = cs_validate_hash(blobs,
0a7de745
A
4579 pager,
4580 page_offset + offset_in_range,
4581 (const void *)((const char *)data + offset_in_range),
4582 &bytes_processed,
4583 &subrange_tainted);
39037602
A
4584
4585 *tainted |= subrange_tainted;
4586
4587 if (bytes_processed == 0) {
4588 /* Cannote make forward progress, so return an error */
4589 all_subranges_validated = FALSE;
4590 break;
4591 } else if (subrange_validated == FALSE) {
4592 all_subranges_validated = FALSE;
4593 /* Keep going to detect other types of failures in subranges */
4594 }
4595
4596 offset_in_range += bytes_processed;
4597 }
4598
4599 return all_subranges_validated;
4600}
4601
f427ee49
A
4602void
4603cs_validate_page(
4604 struct vnode *vp,
4605 memory_object_t pager,
4606 memory_object_offset_t page_offset,
4607 const void *data,
4608 int *validated_p,
4609 int *tainted_p,
4610 int *nx_p)
4611{
4612 vm_size_t offset_in_page;
4613 struct cs_blob *blobs;
4614
4615 blobs = ubc_get_cs_blobs(vp);
4616
4617#if CONFIG_SUPPLEMENTAL_SIGNATURES
4618 if (blobs == NULL && proc_is_translated(current_proc())) {
4619 struct cs_blob *supp = ubc_get_cs_supplement(vp);
4620
4621 if (supp != NULL) {
4622 blobs = supp;
4623 }
4624 }
4625#endif
4626
4627 *validated_p = VMP_CS_ALL_FALSE;
4628 *tainted_p = VMP_CS_ALL_FALSE;
4629 *nx_p = VMP_CS_ALL_FALSE;
4630
4631 for (offset_in_page = 0;
4632 offset_in_page < PAGE_SIZE;
4633 /* offset_in_page updated based on bytes processed */) {
4634 unsigned subrange_tainted = 0;
4635 boolean_t subrange_validated;
4636 vm_size_t bytes_processed = 0;
4637 int sub_bit;
4638
4639 subrange_validated = cs_validate_hash(blobs,
4640 pager,
4641 page_offset + offset_in_page,
4642 (const void *)((const char *)data + offset_in_page),
4643 &bytes_processed,
4644 &subrange_tainted);
4645
4646 if (bytes_processed == 0) {
4647 /* 4k chunk not code-signed: try next one */
4648 offset_in_page += FOURK_PAGE_SIZE;
4649 continue;
4650 }
4651 if (offset_in_page == 0 &&
4652 bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
4653 /* all processed: no 4k granularity */
4654 if (subrange_validated) {
4655 *validated_p = VMP_CS_ALL_TRUE;
4656 }
4657 if (subrange_tainted & CS_VALIDATE_TAINTED) {
4658 *tainted_p = VMP_CS_ALL_TRUE;
4659 }
4660 if (subrange_tainted & CS_VALIDATE_NX) {
4661 *nx_p = VMP_CS_ALL_TRUE;
4662 }
4663 break;
4664 }
4665 /* we only handle 4k or 16k code-signing granularity... */
4666 assertf(bytes_processed <= FOURK_PAGE_SIZE,
4667 "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
4668 vp, blobs, (uint64_t)page_offset,
4669 (uint64_t)offset_in_page, (uint64_t)bytes_processed);
4670 sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
4671 if (subrange_validated) {
4672 *validated_p |= sub_bit;
4673 }
4674 if (subrange_tainted & CS_VALIDATE_TAINTED) {
4675 *tainted_p |= sub_bit;
4676 }
4677 if (subrange_tainted & CS_VALIDATE_NX) {
4678 *nx_p |= sub_bit;
4679 }
4680 /* go to next 4k chunk */
4681 offset_in_page += FOURK_PAGE_SIZE;
4682 }
4683
4684 return;
4685}
4686
2d21ac55
A
4687int
4688ubc_cs_getcdhash(
0a7de745
A
4689 vnode_t vp,
4690 off_t offset,
4691 unsigned char *cdhash)
2d21ac55 4692{
0a7de745
A
4693 struct cs_blob *blobs, *blob;
4694 off_t rel_offset;
4695 int ret;
b0d623f7
A
4696
4697 vnode_lock(vp);
2d21ac55
A
4698
4699 blobs = ubc_get_cs_blobs(vp);
4700 for (blob = blobs;
0a7de745
A
4701 blob != NULL;
4702 blob = blob->csb_next) {
2d21ac55
A
4703 /* compute offset relative to this blob */
4704 rel_offset = offset - blob->csb_base_offset;
4705 if (rel_offset >= blob->csb_start_offset &&
4706 rel_offset < blob->csb_end_offset) {
4707 /* this blob does cover our "offset" ! */
4708 break;
4709 }
4710 }
4711
4712 if (blob == NULL) {
4713 /* we didn't find a blob covering "offset" */
b0d623f7
A
4714 ret = EBADEXEC; /* XXX any better error ? */
4715 } else {
4716 /* get the SHA1 hash of that blob */
f427ee49
A
4717 ptrauth_utils_auth_blob_generic(blob->csb_cdhash,
4718 sizeof(blob->csb_cdhash),
4719 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
4720 PTRAUTH_ADDR_DIVERSIFY,
4721 blob->csb_cdhash_signature);
0a7de745 4722 bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
b0d623f7 4723 ret = 0;
2d21ac55
A
4724 }
4725
b0d623f7 4726 vnode_unlock(vp);
2d21ac55 4727
b0d623f7 4728 return ret;
2d21ac55 4729}
6d2010ae 4730
39037602
A
4731boolean_t
4732ubc_cs_is_range_codesigned(
0a7de745
A
4733 vnode_t vp,
4734 mach_vm_offset_t start,
4735 mach_vm_size_t size)
39037602 4736{
0a7de745
A
4737 struct cs_blob *csblob;
4738 mach_vm_offset_t blob_start;
4739 mach_vm_offset_t blob_end;
39037602
A
4740
4741 if (vp == NULL) {
4742 /* no file: no code signature */
4743 return FALSE;
4744 }
4745 if (size == 0) {
4746 /* no range: no code signature */
4747 return FALSE;
4748 }
4749 if (start + size < start) {
4750 /* overflow */
4751 return FALSE;
4752 }
4753
f427ee49 4754 csblob = ubc_cs_blob_get(vp, -1, -1, start);
39037602
A
4755 if (csblob == NULL) {
4756 return FALSE;
4757 }
4758
4759 /*
4760 * We currently check if the range is covered by a single blob,
4761 * which should always be the case for the dyld shared cache.
4762 * If we ever want to make this routine handle other cases, we
4763 * would have to iterate if the blob does not cover the full range.
4764 */
4765 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4766 csblob->csb_start_offset);
39037602 4767 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4768 csblob->csb_end_offset);
39037602
A
4769 if (blob_start > start || blob_end < (start + size)) {
4770 /* range not fully covered by this code-signing blob */
4771 return FALSE;
4772 }
4773
4774 return TRUE;
4775}
4776
6d2010ae 4777#if CHECK_CS_VALIDATION_BITMAP
0a7de745
A
4778#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4779extern boolean_t root_fs_upgrade_try;
6d2010ae
A
4780
4781/*
4782 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4783 * Depends:
4784 * a) Is the target vnode on the root filesystem?
4785 * b) Has someone tried to mount the root filesystem read-write?
4786 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4787 */
0a7de745 4788#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6d2010ae
A
4789kern_return_t
4790ubc_cs_validation_bitmap_allocate(
0a7de745 4791 vnode_t vp)
6d2010ae 4792{
0a7de745 4793 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4794 struct ubc_info *uip;
0a7de745
A
4795 char *target_bitmap;
4796 vm_object_size_t bitmap_size;
6d2010ae 4797
0a7de745 4798 if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6d2010ae
A
4799 kr = KERN_INVALID_ARGUMENT;
4800 } else {
4801 uip = vp->v_ubcinfo;
4802
0a7de745 4803 if (uip->cs_valid_bitmap == NULL) {
6d2010ae 4804 bitmap_size = stob(uip->ui_size);
0a7de745 4805 target_bitmap = (char*) kalloc((vm_size_t)bitmap_size );
6d2010ae
A
4806 if (target_bitmap == 0) {
4807 kr = KERN_NO_SPACE;
4808 } else {
4809 kr = KERN_SUCCESS;
4810 }
0a7de745 4811 if (kr == KERN_SUCCESS) {
6d2010ae
A
4812 memset( target_bitmap, 0, (size_t)bitmap_size);
4813 uip->cs_valid_bitmap = (void*)target_bitmap;
4814 uip->cs_valid_bitmap_size = bitmap_size;
4815 }
4816 }
4817 }
4818 return kr;
4819}
4820
4821kern_return_t
0a7de745
A
4822ubc_cs_check_validation_bitmap(
4823 vnode_t vp,
4824 memory_object_offset_t offset,
4825 int optype)
6d2010ae 4826{
0a7de745 4827 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4828
0a7de745 4829 if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6d2010ae
A
4830 kr = KERN_INVALID_ARGUMENT;
4831 } else {
4832 struct ubc_info *uip = vp->v_ubcinfo;
0a7de745 4833 char *target_bitmap = uip->cs_valid_bitmap;
6d2010ae 4834
0a7de745
A
4835 if (target_bitmap == NULL) {
4836 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4837 } else {
0a7de745 4838 uint64_t bit, byte;
6d2010ae
A
4839 bit = atop_64( offset );
4840 byte = bit >> 3;
4841
0a7de745
A
4842 if (byte > uip->cs_valid_bitmap_size) {
4843 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4844 } else {
6d2010ae
A
4845 if (optype == CS_BITMAP_SET) {
4846 target_bitmap[byte] |= (1 << (bit & 07));
4847 kr = KERN_SUCCESS;
4848 } else if (optype == CS_BITMAP_CLEAR) {
4849 target_bitmap[byte] &= ~(1 << (bit & 07));
4850 kr = KERN_SUCCESS;
4851 } else if (optype == CS_BITMAP_CHECK) {
0a7de745 4852 if (target_bitmap[byte] & (1 << (bit & 07))) {
6d2010ae
A
4853 kr = KERN_SUCCESS;
4854 } else {
4855 kr = KERN_FAILURE;
4856 }
4857 }
4858 }
4859 }
4860 }
4861 return kr;
4862}
4863
4864void
4865ubc_cs_validation_bitmap_deallocate(
0a7de745 4866 vnode_t vp)
6d2010ae
A
4867{
4868 struct ubc_info *uip;
0a7de745
A
4869 void *target_bitmap;
4870 vm_object_size_t bitmap_size;
6d2010ae 4871
0a7de745 4872 if (UBCINFOEXISTS(vp)) {
6d2010ae
A
4873 uip = vp->v_ubcinfo;
4874
0a7de745 4875 if ((target_bitmap = uip->cs_valid_bitmap) != NULL) {
6d2010ae
A
4876 bitmap_size = uip->cs_valid_bitmap_size;
4877 kfree( target_bitmap, (vm_size_t) bitmap_size );
4878 uip->cs_valid_bitmap = NULL;
4879 }
4880 }
4881}
4882#else
0a7de745
A
4883kern_return_t
4884ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
4885{
6d2010ae
A
4886 return KERN_INVALID_ARGUMENT;
4887}
4888
0a7de745
A
4889kern_return_t
4890ubc_cs_check_validation_bitmap(
4891 __unused struct vnode *vp,
6d2010ae 4892 __unused memory_object_offset_t offset,
0a7de745
A
4893 __unused int optype)
4894{
6d2010ae
A
4895 return KERN_INVALID_ARGUMENT;
4896}
4897
0a7de745
A
4898void
4899ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
4900{
6d2010ae
A
4901 return;
4902}
4903#endif /* CHECK_CS_VALIDATION_BITMAP */
d9a64523 4904