]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
fe8ab488 2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
0a7de745 28/*
1c79356b
A
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
0a7de745 37 */
1c79356b 38
1c79356b
A
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
91447636
A
43#include <sys/mman.h>
44#include <sys/mount_internal.h>
45#include <sys/vnode_internal.h>
46#include <sys/ubc_internal.h>
1c79356b 47#include <sys/ucred.h>
91447636
A
48#include <sys/proc_internal.h>
49#include <sys/kauth.h>
1c79356b 50#include <sys/buf.h>
13fec989 51#include <sys/user.h>
2d21ac55 52#include <sys/codesign.h>
fe8ab488
A
53#include <sys/codedir_internal.h>
54#include <sys/fsevents.h>
c18c124e 55#include <sys/fcntl.h>
1c79356b
A
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
91447636
A
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
b0d623f7 61#include <mach/mach_vm.h>
91447636 62#include <mach/upl.h>
1c79356b 63
91447636 64#include <kern/kern_types.h>
2d21ac55 65#include <kern/kalloc.h>
1c79356b 66#include <kern/zalloc.h>
13fec989 67#include <kern/thread.h>
5ba3f43e 68#include <vm/pmap.h>
91447636
A
69#include <vm/vm_kern.h>
70#include <vm/vm_protos.h> /* last */
1c79356b 71
2d21ac55 72#include <libkern/crypto/sha1.h>
3e170ce0 73#include <libkern/crypto/sha2.h>
39236c6e
A
74#include <libkern/libkern.h>
75
593a1d5f 76#include <security/mac_framework.h>
fe8ab488 77#include <stdbool.h>
593a1d5f 78
2d21ac55
A
79/* XXX These should be in a BSD accessible Mach header, but aren't. */
80extern kern_return_t memory_object_pages_resident(memory_object_control_t,
0a7de745
A
81 boolean_t *);
82extern kern_return_t memory_object_signed(memory_object_control_t control,
83 boolean_t is_signed);
84extern boolean_t memory_object_is_signed(memory_object_control_t);
cb323159
A
85extern void memory_object_mark_trusted(
86 memory_object_control_t control);
6d2010ae 87
d9a64523
A
88/* XXX Same for those. */
89
2d21ac55
A
90extern void Debugger(const char *message);
91
92
93/* XXX no one uses this interface! */
94kern_return_t ubc_page_op_with_control(
0a7de745
A
95 memory_object_control_t control,
96 off_t f_offset,
97 int ops,
98 ppnum_t *phys_entryp,
99 int *flagsp);
2d21ac55
A
100
101
1c79356b
A
102#if DIAGNOSTIC
103#if defined(assert)
b0d623f7 104#undef assert
1c79356b
A
105#endif
106#define assert(cond) \
2d21ac55 107 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
1c79356b
A
108#else
109#include <kern/assert.h>
110#endif /* DIAGNOSTIC */
111
2d21ac55 112static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
0c530ab8 113static int ubc_umcallback(vnode_t, void *);
0c530ab8 114static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
2d21ac55 115static void ubc_cs_free(struct ubc_info *uip);
b4c24cb9 116
39037602 117static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
d9a64523 118static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
39037602 119
0a7de745
A
120struct zone *ubc_info_zone;
121static uint32_t cs_blob_generation_count = 1;
2d21ac55
A
122
123/*
124 * CODESIGNING
125 * Routines to navigate code signing data structures in the kernel...
126 */
b0d623f7
A
127
128extern int cs_debug;
129
0a7de745 130#define PAGE_SHIFT_4K (12)
fe8ab488 131
2d21ac55
A
132static boolean_t
133cs_valid_range(
134 const void *start,
135 const void *end,
136 const void *lower_bound,
137 const void *upper_bound)
138{
139 if (upper_bound < lower_bound ||
140 end < start) {
141 return FALSE;
142 }
143
144 if (start < lower_bound ||
145 end > upper_bound) {
146 return FALSE;
147 }
148
149 return TRUE;
150}
151
3e170ce0
A
152typedef void (*cs_md_init)(void *ctx);
153typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
154typedef void (*cs_md_final)(void *hash, void *ctx);
155
156struct cs_hash {
0a7de745
A
157 uint8_t cs_type; /* type code as per code signing */
158 size_t cs_size; /* size of effective hash (may be truncated) */
159 size_t cs_digest_size;/* size of native hash */
160 cs_md_init cs_init;
161 cs_md_update cs_update;
162 cs_md_final cs_final;
3e170ce0
A
163};
164
0a7de745
A
165uint8_t
166cs_hash_type(
167 struct cs_hash const * const cs_hash)
5ba3f43e 168{
0a7de745 169 return cs_hash->cs_type;
5ba3f43e
A
170}
171
d190cdc3 172static const struct cs_hash cs_hash_sha1 = {
0a7de745
A
173 .cs_type = CS_HASHTYPE_SHA1,
174 .cs_size = CS_SHA1_LEN,
175 .cs_digest_size = SHA_DIGEST_LENGTH,
176 .cs_init = (cs_md_init)SHA1Init,
177 .cs_update = (cs_md_update)SHA1Update,
178 .cs_final = (cs_md_final)SHA1Final,
3e170ce0
A
179};
180#if CRYPTO_SHA2
d190cdc3 181static const struct cs_hash cs_hash_sha256 = {
0a7de745
A
182 .cs_type = CS_HASHTYPE_SHA256,
183 .cs_size = SHA256_DIGEST_LENGTH,
184 .cs_digest_size = SHA256_DIGEST_LENGTH,
185 .cs_init = (cs_md_init)SHA256_Init,
186 .cs_update = (cs_md_update)SHA256_Update,
187 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 188};
d190cdc3 189static const struct cs_hash cs_hash_sha256_truncate = {
0a7de745
A
190 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
191 .cs_size = CS_SHA256_TRUNCATED_LEN,
192 .cs_digest_size = SHA256_DIGEST_LENGTH,
193 .cs_init = (cs_md_init)SHA256_Init,
194 .cs_update = (cs_md_update)SHA256_Update,
195 .cs_final = (cs_md_final)SHA256_Final,
3e170ce0 196};
d190cdc3 197static const struct cs_hash cs_hash_sha384 = {
0a7de745
A
198 .cs_type = CS_HASHTYPE_SHA384,
199 .cs_size = SHA384_DIGEST_LENGTH,
200 .cs_digest_size = SHA384_DIGEST_LENGTH,
201 .cs_init = (cs_md_init)SHA384_Init,
202 .cs_update = (cs_md_update)SHA384_Update,
203 .cs_final = (cs_md_final)SHA384_Final,
490019cf 204};
3e170ce0 205#endif
39037602 206
d190cdc3 207static struct cs_hash const *
3e170ce0
A
208cs_find_md(uint8_t type)
209{
210 if (type == CS_HASHTYPE_SHA1) {
211 return &cs_hash_sha1;
212#if CRYPTO_SHA2
213 } else if (type == CS_HASHTYPE_SHA256) {
214 return &cs_hash_sha256;
215 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
216 return &cs_hash_sha256_truncate;
490019cf
A
217 } else if (type == CS_HASHTYPE_SHA384) {
218 return &cs_hash_sha384;
3e170ce0
A
219#endif
220 }
221 return NULL;
222}
223
224union cs_hash_union {
0a7de745
A
225 SHA1_CTX sha1ctxt;
226 SHA256_CTX sha256ctx;
227 SHA384_CTX sha384ctx;
3e170ce0
A
228};
229
230
2d21ac55 231/*
490019cf
A
232 * Choose among different hash algorithms.
233 * Higher is better, 0 => don't use at all.
2d21ac55 234 */
d190cdc3 235static const uint32_t hashPriorities[] = {
490019cf
A
236 CS_HASHTYPE_SHA1,
237 CS_HASHTYPE_SHA256_TRUNCATED,
238 CS_HASHTYPE_SHA256,
239 CS_HASHTYPE_SHA384,
240};
b0d623f7 241
490019cf
A
242static unsigned int
243hash_rank(const CS_CodeDirectory *cd)
244{
245 uint32_t type = cd->hashType;
246 unsigned int n;
2d21ac55 247
0a7de745
A
248 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
249 if (hashPriorities[n] == type) {
490019cf 250 return n + 1;
0a7de745
A
251 }
252 }
253 return 0; /* not supported */
2d21ac55
A
254}
255
256
257/*
258 * Locating a page hash
259 */
260static const unsigned char *
261hashes(
262 const CS_CodeDirectory *cd,
3e170ce0
A
263 uint32_t page,
264 size_t hash_len,
265 const char *lower_bound,
266 const char *upper_bound)
2d21ac55
A
267{
268 const unsigned char *base, *top, *hash;
b0d623f7 269 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
2d21ac55
A
270
271 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
272
0a7de745 273 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
b0d623f7 274 /* Get first scatter struct */
39236c6e 275 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745
A
276 ((const char*)cd + ntohl(cd->scatterOffset));
277 uint32_t hashindex = 0, scount, sbase = 0;
b0d623f7
A
278 /* iterate all scatter structs */
279 do {
0a7de745
A
280 if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
281 if (cs_debug) {
b0d623f7
A
282 printf("CODE SIGNING: Scatter extends past Code Directory\n");
283 }
284 return NULL;
285 }
0a7de745 286
b0d623f7
A
287 scount = ntohl(scatter->count);
288 uint32_t new_base = ntohl(scatter->base);
289
290 /* last scatter? */
291 if (scount == 0) {
292 return NULL;
293 }
0a7de745
A
294
295 if ((hashindex > 0) && (new_base <= sbase)) {
296 if (cs_debug) {
b0d623f7 297 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
0a7de745 298 sbase, new_base);
b0d623f7 299 }
0a7de745 300 return NULL; /* unordered scatter array */
b0d623f7
A
301 }
302 sbase = new_base;
303
304 /* this scatter beyond page we're looking for? */
305 if (sbase > page) {
306 return NULL;
307 }
0a7de745
A
308
309 if (sbase + scount >= page) {
310 /* Found the scatter struct that is
b0d623f7
A
311 * referencing our page */
312
313 /* base = address of first hash covered by scatter */
0a7de745
A
314 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
315 hashindex * hash_len;
b0d623f7 316 /* top = address of first hash after this scatter */
3e170ce0 317 top = base + scount * hash_len;
0a7de745
A
318 if (!cs_valid_range(base, top, lower_bound,
319 upper_bound) ||
b0d623f7
A
320 hashindex > nCodeSlots) {
321 return NULL;
322 }
0a7de745 323
b0d623f7
A
324 break;
325 }
0a7de745
A
326
327 /* this scatter struct is before the page we're looking
b0d623f7 328 * for. Iterate. */
0a7de745 329 hashindex += scount;
b0d623f7 330 scatter++;
0a7de745
A
331 } while (1);
332
3e170ce0 333 hash = base + (page - sbase) * hash_len;
b0d623f7
A
334 } else {
335 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
3e170ce0 336 top = base + nCodeSlots * hash_len;
b0d623f7
A
337 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
338 page > nCodeSlots) {
339 return NULL;
340 }
341 assert(page < nCodeSlots);
2d21ac55 342
3e170ce0 343 hash = base + page * hash_len;
b0d623f7 344 }
0a7de745 345
3e170ce0 346 if (!cs_valid_range(hash, hash + hash_len,
0a7de745 347 lower_bound, upper_bound)) {
2d21ac55
A
348 hash = NULL;
349 }
350
351 return hash;
352}
39236c6e
A
353
354/*
355 * cs_validate_codedirectory
356 *
357 * Validate that pointers inside the code directory to make sure that
358 * all offsets and lengths are constrained within the buffer.
359 *
360 * Parameters: cd Pointer to code directory buffer
361 * length Length of buffer
362 *
363 * Returns: 0 Success
364 * EBADEXEC Invalid code signature
365 */
366
367static int
368cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
369{
d190cdc3 370 struct cs_hash const *hashtype;
39236c6e 371
0a7de745 372 if (length < sizeof(*cd)) {
39236c6e 373 return EBADEXEC;
0a7de745
A
374 }
375 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
39236c6e 376 return EBADEXEC;
0a7de745
A
377 }
378 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
39236c6e 379 return EBADEXEC;
0a7de745 380 }
3e170ce0 381 hashtype = cs_find_md(cd->hashType);
0a7de745 382 if (hashtype == NULL) {
39236c6e 383 return EBADEXEC;
0a7de745 384 }
39236c6e 385
0a7de745 386 if (cd->hashSize != hashtype->cs_size) {
3e170ce0 387 return EBADEXEC;
0a7de745 388 }
3e170ce0 389
0a7de745 390 if (length < ntohl(cd->hashOffset)) {
39236c6e 391 return EBADEXEC;
0a7de745 392 }
39236c6e
A
393
394 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
0a7de745 395 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
39236c6e 396 return EBADEXEC;
0a7de745 397 }
39236c6e
A
398
399 /* check that codeslots fits in the buffer */
0a7de745 400 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
39236c6e 401 return EBADEXEC;
0a7de745 402 }
39236c6e 403
0a7de745
A
404 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
405 if (length < ntohl(cd->scatterOffset)) {
39236c6e 406 return EBADEXEC;
0a7de745 407 }
39236c6e 408
3e170ce0 409 const SC_Scatter *scatter = (const SC_Scatter *)
0a7de745 410 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
39236c6e
A
411 uint32_t nPages = 0;
412
413 /*
414 * Check each scatter buffer, since we don't know the
415 * length of the scatter buffer array, we have to
416 * check each entry.
417 */
0a7de745 418 while (1) {
39236c6e 419 /* check that the end of each scatter buffer in within the length */
0a7de745 420 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
39236c6e 421 return EBADEXEC;
0a7de745 422 }
39236c6e 423 uint32_t scount = ntohl(scatter->count);
0a7de745 424 if (scount == 0) {
39236c6e 425 break;
0a7de745
A
426 }
427 if (nPages + scount < nPages) {
39236c6e 428 return EBADEXEC;
0a7de745 429 }
39236c6e
A
430 nPages += scount;
431 scatter++;
432
433 /* XXX check that basees doesn't overlap */
434 /* XXX check that targetOffset doesn't overlap */
435 }
436#if 0 /* rdar://12579439 */
0a7de745 437 if (nPages != ntohl(cd->nCodeSlots)) {
39236c6e 438 return EBADEXEC;
0a7de745 439 }
39236c6e
A
440#endif
441 }
442
0a7de745 443 if (length < ntohl(cd->identOffset)) {
39236c6e 444 return EBADEXEC;
0a7de745 445 }
39236c6e
A
446
447 /* identifier is NUL terminated string */
448 if (cd->identOffset) {
3e170ce0 449 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
0a7de745 450 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
39236c6e 451 return EBADEXEC;
0a7de745 452 }
39236c6e
A
453 }
454
fe8ab488
A
455 /* team identifier is NULL terminated string */
456 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
0a7de745 457 if (length < ntohl(cd->teamOffset)) {
fe8ab488 458 return EBADEXEC;
0a7de745 459 }
fe8ab488 460
3e170ce0 461 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
0a7de745 462 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
fe8ab488 463 return EBADEXEC;
0a7de745 464 }
fe8ab488
A
465 }
466
39236c6e
A
467 return 0;
468}
469
470/*
471 *
472 */
473
474static int
475cs_validate_blob(const CS_GenericBlob *blob, size_t length)
476{
0a7de745 477 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
39236c6e 478 return EBADEXEC;
0a7de745 479 }
39236c6e
A
480 return 0;
481}
482
483/*
484 * cs_validate_csblob
485 *
486 * Validate that superblob/embedded code directory to make sure that
487 * all internal pointers are valid.
488 *
489 * Will validate both a superblob csblob and a "raw" code directory.
490 *
491 *
492 * Parameters: buffer Pointer to code signature
493 * length Length of buffer
494 * rcd returns pointer to code directory
495 *
496 * Returns: 0 Success
497 * EBADEXEC Invalid code signature
498 */
499
500static int
813fb2f6
A
501cs_validate_csblob(
502 const uint8_t *addr,
d9a64523 503 const size_t blob_size,
813fb2f6
A
504 const CS_CodeDirectory **rcd,
505 const CS_GenericBlob **rentitlements)
39236c6e 506{
813fb2f6 507 const CS_GenericBlob *blob;
39236c6e 508 int error;
d9a64523 509 size_t length;
39236c6e
A
510
511 *rcd = NULL;
39037602 512 *rentitlements = NULL;
39236c6e 513
813fb2f6 514 blob = (const CS_GenericBlob *)(const void *)addr;
813fb2f6
A
515
516 length = blob_size;
39236c6e 517 error = cs_validate_blob(blob, length);
0a7de745 518 if (error) {
39236c6e 519 return error;
0a7de745 520 }
39236c6e
A
521 length = ntohl(blob->length);
522
523 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
490019cf
A
524 const CS_SuperBlob *sb;
525 uint32_t n, count;
526 const CS_CodeDirectory *best_cd = NULL;
527 unsigned int best_rank = 0;
5ba3f43e
A
528#if PLATFORM_WatchOS
529 const CS_CodeDirectory *sha1_cd = NULL;
530#endif
39236c6e 531
0a7de745 532 if (length < sizeof(CS_SuperBlob)) {
39236c6e 533 return EBADEXEC;
0a7de745 534 }
39236c6e 535
490019cf
A
536 sb = (const CS_SuperBlob *)blob;
537 count = ntohl(sb->count);
538
39236c6e 539 /* check that the array of BlobIndex fits in the rest of the data */
0a7de745 540 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
39236c6e 541 return EBADEXEC;
0a7de745 542 }
39236c6e
A
543
544 /* now check each BlobIndex */
545 for (n = 0; n < count; n++) {
546 const CS_BlobIndex *blobIndex = &sb->index[n];
490019cf
A
547 uint32_t type = ntohl(blobIndex->type);
548 uint32_t offset = ntohl(blobIndex->offset);
0a7de745 549 if (length < offset) {
39236c6e 550 return EBADEXEC;
0a7de745 551 }
39236c6e
A
552
553 const CS_GenericBlob *subBlob =
0a7de745 554 (const CS_GenericBlob *)(const void *)(addr + offset);
39236c6e 555
490019cf 556 size_t subLength = length - offset;
39236c6e 557
0a7de745 558 if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
39236c6e 559 return error;
0a7de745 560 }
39236c6e
A
561 subLength = ntohl(subBlob->length);
562
563 /* extra validation for CDs, that is also returned */
490019cf
A
564 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
565 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
0a7de745 566 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
39236c6e 567 return error;
0a7de745 568 }
490019cf 569 unsigned int rank = hash_rank(candidate);
0a7de745 570 if (cs_debug > 3) {
490019cf 571 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
0a7de745 572 }
490019cf
A
573 if (best_cd == NULL || rank > best_rank) {
574 best_cd = candidate;
575 best_rank = rank;
39037602 576
0a7de745 577 if (cs_debug > 2) {
39037602 578 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
0a7de745 579 }
39037602 580 *rcd = best_cd;
490019cf
A
581 } else if (best_cd != NULL && rank == best_rank) {
582 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
39037602
A
583 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
584 return EBADEXEC;
585 }
5ba3f43e
A
586#if PLATFORM_WatchOS
587 if (candidate->hashType == CS_HASHTYPE_SHA1) {
588 if (sha1_cd != NULL) {
589 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
590 return EBADEXEC;
591 }
592 sha1_cd = candidate;
593 }
594#endif
39037602
A
595 } else if (type == CSSLOT_ENTITLEMENTS) {
596 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
597 return EBADEXEC;
598 }
599 if (*rentitlements != NULL) {
600 printf("multiple entitlements blobs\n");
490019cf
A
601 return EBADEXEC;
602 }
39037602 603 *rentitlements = subBlob;
39236c6e
A
604 }
605 }
606
5ba3f43e
A
607#if PLATFORM_WatchOS
608 /* To keep watchOS fast enough, we have to resort to sha1 for
609 * some code.
610 *
611 * At the time of writing this comment, known sha1 attacks are
612 * collision attacks (not preimage or second preimage
613 * attacks), which do not apply to platform binaries since
614 * they have a fixed hash in the trust cache. Given this
615 * property, we only prefer sha1 code directories for adhoc
616 * signatures, which always have to be in a trust cache to be
617 * valid (can-load-cdhash does not exist for watchOS). Those
618 * are, incidentally, also the platform binaries, for which we
619 * care about the performance hit that sha256 would bring us.
620 *
621 * Platform binaries may still contain a (not chosen) sha256
622 * code directory, which keeps software updates that switch to
623 * sha256-only small.
624 */
625
626 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
627 if (sha1_cd->flags != (*rcd)->flags) {
628 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
0a7de745 629 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
5ba3f43e
A
630 *rcd = NULL;
631 return EBADEXEC;
632 }
633
634 *rcd = sha1_cd;
635 }
636#endif
39236c6e 637 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
0a7de745 638 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
39236c6e 639 return error;
0a7de745 640 }
39236c6e
A
641 *rcd = (const CS_CodeDirectory *)blob;
642 } else {
643 return EBADEXEC;
644 }
645
0a7de745 646 if (*rcd == NULL) {
39236c6e 647 return EBADEXEC;
0a7de745 648 }
39236c6e
A
649
650 return 0;
651}
652
653/*
654 * cs_find_blob_bytes
655 *
656 * Find an blob from the superblob/code directory. The blob must have
657 * been been validated by cs_validate_csblob() before calling
3e170ce0 658 * this. Use csblob_find_blob() instead.
0a7de745 659 *
39236c6e
A
660 * Will also find a "raw" code directory if its stored as well as
661 * searching the superblob.
662 *
663 * Parameters: buffer Pointer to code signature
664 * length Length of buffer
665 * type type of blob to find
666 * magic the magic number for that blob
667 *
668 * Returns: pointer Success
669 * NULL Buffer not found
670 */
671
3e170ce0
A
672const CS_GenericBlob *
673csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
39236c6e 674{
3e170ce0 675 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
39236c6e
A
676
677 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
678 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
679 size_t n, count = ntohl(sb->count);
680
681 for (n = 0; n < count; n++) {
0a7de745 682 if (ntohl(sb->index[n].type) != type) {
39236c6e 683 continue;
0a7de745 684 }
39236c6e 685 uint32_t offset = ntohl(sb->index[n].offset);
0a7de745 686 if (length - sizeof(const CS_GenericBlob) < offset) {
39236c6e 687 return NULL;
0a7de745 688 }
3e170ce0 689 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
0a7de745 690 if (ntohl(blob->magic) != magic) {
39236c6e 691 continue;
0a7de745 692 }
39236c6e
A
693 return blob;
694 }
695 } else if (type == CSSLOT_CODEDIRECTORY
0a7de745
A
696 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
697 && magic == CSMAGIC_CODEDIRECTORY) {
39236c6e 698 return blob;
0a7de745 699 }
39236c6e
A
700 return NULL;
701}
702
703
fe8ab488 704const CS_GenericBlob *
3e170ce0 705csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
39236c6e 706{
0a7de745 707 if ((csblob->csb_flags & CS_VALID) == 0) {
39236c6e 708 return NULL;
0a7de745 709 }
3e170ce0 710 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
39236c6e
A
711}
712
713static const uint8_t *
3e170ce0 714find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
39236c6e
A
715{
716 /* there is no zero special slot since that is the first code slot */
0a7de745 717 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
39236c6e 718 return NULL;
0a7de745 719 }
39236c6e 720
0a7de745 721 return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
39236c6e
A
722}
723
3e170ce0 724static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
39236c6e 725
6d2010ae 726int
3e170ce0 727csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
6d2010ae 728{
3e170ce0 729 uint8_t computed_hash[CS_HASH_MAX_SIZE];
39236c6e
A
730 const CS_GenericBlob *entitlements;
731 const CS_CodeDirectory *code_dir;
39236c6e 732 const uint8_t *embedded_hash;
3e170ce0 733 union cs_hash_union context;
39236c6e
A
734
735 *out_start = NULL;
736 *out_length = 0;
737
0a7de745
A
738 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
739 return EBADEXEC;
740 }
39236c6e 741
490019cf 742 code_dir = csblob->csb_cd;
39236c6e 743
39037602
A
744 if ((csblob->csb_flags & CS_VALID) == 0) {
745 entitlements = NULL;
746 } else {
747 entitlements = csblob->csb_entitlements_blob;
748 }
3e170ce0 749 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
39236c6e
A
750
751 if (embedded_hash == NULL) {
0a7de745 752 if (entitlements) {
39236c6e 753 return EBADEXEC;
0a7de745 754 }
39236c6e 755 return 0;
490019cf
A
756 } else if (entitlements == NULL) {
757 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
758 return EBADEXEC;
759 } else {
760 return 0;
761 }
6d2010ae 762 }
39236c6e 763
3e170ce0
A
764 csblob->csb_hashtype->cs_init(&context);
765 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
766 csblob->csb_hashtype->cs_final(computed_hash, &context);
767
0a7de745 768 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
39236c6e 769 return EBADEXEC;
0a7de745 770 }
39236c6e 771
3e170ce0 772 *out_start = __DECONST(void *, entitlements);
39236c6e
A
773 *out_length = ntohl(entitlements->length);
774
775 return 0;
776}
777
6d2010ae 778/*
3e170ce0
A
779 * CODESIGNING
780 * End of routines to navigate code signing data structures in the kernel.
6d2010ae
A
781 */
782
783
2d21ac55 784
1c79356b 785/*
2d21ac55 786 * ubc_init
0a7de745 787 *
2d21ac55
A
788 * Initialization of the zone for Unified Buffer Cache.
789 *
790 * Parameters: (void)
791 *
792 * Returns: (void)
793 *
794 * Implicit returns:
795 * ubc_info_zone(global) initialized for subsequent allocations
1c79356b 796 */
0b4e3aa0 797__private_extern__ void
2d21ac55 798ubc_init(void)
1c79356b 799{
0a7de745 800 int i;
1c79356b 801
0a7de745 802 i = (vm_size_t) sizeof(struct ubc_info);
2d21ac55 803
0a7de745 804 ubc_info_zone = zinit(i, 10000 * i, 8192, "ubc_info zone");
0b4c1975
A
805
806 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
1c79356b
A
807}
808
2d21ac55 809
1c79356b 810/*
2d21ac55
A
811 * ubc_info_init
812 *
813 * Allocate and attach an empty ubc_info structure to a vnode
814 *
815 * Parameters: vp Pointer to the vnode
816 *
817 * Returns: 0 Success
818 * vnode_size:ENOMEM Not enough space
819 * vnode_size:??? Other error from vnode_getattr
820 *
1c79356b
A
821 */
822int
823ubc_info_init(struct vnode *vp)
91447636 824{
0a7de745 825 return ubc_info_init_internal(vp, 0, 0);
91447636 826}
2d21ac55
A
827
828
829/*
830 * ubc_info_init_withsize
831 *
832 * Allocate and attach a sized ubc_info structure to a vnode
833 *
834 * Parameters: vp Pointer to the vnode
835 * filesize The size of the file
836 *
837 * Returns: 0 Success
838 * vnode_size:ENOMEM Not enough space
839 * vnode_size:??? Other error from vnode_getattr
840 */
91447636
A
841int
842ubc_info_init_withsize(struct vnode *vp, off_t filesize)
843{
0a7de745 844 return ubc_info_init_internal(vp, 1, filesize);
91447636
A
845}
846
2d21ac55
A
847
848/*
849 * ubc_info_init_internal
850 *
851 * Allocate and attach a ubc_info structure to a vnode
852 *
853 * Parameters: vp Pointer to the vnode
854 * withfsize{0,1} Zero if the size should be obtained
855 * from the vnode; otherwise, use filesize
856 * filesize The size of the file, if withfsize == 1
857 *
858 * Returns: 0 Success
859 * vnode_size:ENOMEM Not enough space
860 * vnode_size:??? Other error from vnode_getattr
861 *
862 * Notes: We call a blocking zalloc(), and the zone was created as an
863 * expandable and collectable zone, so if no memory is available,
864 * it is possible for zalloc() to block indefinitely. zalloc()
865 * may also panic if the zone of zones is exhausted, since it's
866 * NOT expandable.
867 *
868 * We unconditionally call vnode_pager_setup(), even if this is
869 * a reuse of a ubc_info; in that case, we should probably assert
870 * that it does not already have a pager association, but do not.
871 *
872 * Since memory_object_create_named() can only fail from receiving
873 * an invalid pager argument, the explicit check and panic is
874 * merely precautionary.
875 */
876static int
877ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1c79356b 878{
0a7de745 879 struct ubc_info *uip;
1c79356b 880 void * pager;
1c79356b
A
881 int error = 0;
882 kern_return_t kret;
0b4e3aa0 883 memory_object_control_t control;
1c79356b 884
91447636 885 uip = vp->v_ubcinfo;
1c79356b 886
2d21ac55
A
887 /*
888 * If there is not already a ubc_info attached to the vnode, we
889 * attach one; otherwise, we will reuse the one that's there.
890 */
91447636 891 if (uip == UBC_INFO_NULL) {
1c79356b 892 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
893 bzero((char *)uip, sizeof(struct ubc_info));
894
1c79356b 895 uip->ui_vnode = vp;
91447636 896 uip->ui_flags = UI_INITED;
1c79356b
A
897 uip->ui_ucred = NOCRED;
898 }
1c79356b
A
899 assert(uip->ui_flags != UI_NONE);
900 assert(uip->ui_vnode == vp);
901
1c79356b
A
902 /* now set this ubc_info in the vnode */
903 vp->v_ubcinfo = uip;
91447636 904
2d21ac55
A
905 /*
906 * Allocate a pager object for this vnode
907 *
908 * XXX The value of the pager parameter is currently ignored.
909 * XXX Presumably, this API changed to avoid the race between
910 * XXX setting the pager and the UI_HASPAGER flag.
911 */
1c79356b
A
912 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
913 assert(pager);
91447636 914
2d21ac55
A
915 /*
916 * Explicitly set the pager into the ubc_info, after setting the
917 * UI_HASPAGER flag.
918 */
91447636
A
919 SET(uip->ui_flags, UI_HASPAGER);
920 uip->ui_pager = pager;
1c79356b
A
921
922 /*
91447636 923 * Note: We can not use VNOP_GETATTR() to get accurate
2d21ac55
A
924 * value of ui_size because this may be an NFS vnode, and
925 * nfs_getattr() can call vinvalbuf(); if this happens,
926 * ubc_info is not set up to deal with that event.
1c79356b
A
927 * So use bogus size.
928 */
929
1c79356b 930 /*
0b4e3aa0
A
931 * create a vnode - vm_object association
932 * memory_object_create_named() creates a "named" reference on the
933 * memory object we hold this reference as long as the vnode is
934 * "alive." Since memory_object_create_named() took its own reference
935 * on the vnode pager we passed it, we can drop the reference
936 * vnode_pager_setup() returned here.
1c79356b 937 */
0b4e3aa0 938 kret = memory_object_create_named(pager,
0a7de745
A
939 (memory_object_size_t)uip->ui_size, &control);
940 vnode_pager_deallocate(pager);
941 if (kret != KERN_SUCCESS) {
0b4e3aa0 942 panic("ubc_info_init: memory_object_create_named returned %d", kret);
0a7de745 943 }
1c79356b 944
0b4e3aa0 945 assert(control);
0a7de745
A
946 uip->ui_control = control; /* cache the value of the mo control */
947 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
2d21ac55 948
91447636 949 if (withfsize == 0) {
91447636 950 /* initialize the size */
2d21ac55 951 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
0a7de745 952 if (error) {
91447636 953 uip->ui_size = 0;
0a7de745 954 }
91447636
A
955 } else {
956 uip->ui_size = filesize;
957 }
0a7de745 958 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
1c79356b 959
0a7de745 960 return error;
1c79356b
A
961}
962
2d21ac55
A
963
964/*
965 * ubc_info_free
966 *
967 * Free a ubc_info structure
968 *
969 * Parameters: uip A pointer to the ubc_info to free
970 *
971 * Returns: (void)
972 *
973 * Notes: If there is a credential that has subsequently been associated
974 * with the ubc_info via a call to ubc_setcred(), the reference
975 * to the credential is dropped.
976 *
977 * It's actually impossible for a ubc_info.ui_control to take the
978 * value MEMORY_OBJECT_CONTROL_NULL.
979 */
0b4e3aa0
A
980static void
981ubc_info_free(struct ubc_info *uip)
1c79356b 982{
0c530ab8
A
983 if (IS_VALID_CRED(uip->ui_ucred)) {
984 kauth_cred_unref(&uip->ui_ucred);
1c79356b 985 }
0b4e3aa0 986
0a7de745 987 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 988 memory_object_control_deallocate(uip->ui_control);
0a7de745
A
989 }
990
91447636 991 cluster_release(uip);
2d21ac55 992 ubc_cs_free(uip);
0b4e3aa0 993
2d21ac55 994 zfree(ubc_info_zone, uip);
1c79356b
A
995 return;
996}
997
2d21ac55 998
0b4e3aa0
A
999void
1000ubc_info_deallocate(struct ubc_info *uip)
1001{
0a7de745 1002 ubc_info_free(uip);
0b4e3aa0
A
1003}
1004
0a7de745
A
1005errno_t
1006mach_to_bsd_errno(kern_return_t mach_err)
fe8ab488
A
1007{
1008 switch (mach_err) {
1009 case KERN_SUCCESS:
1010 return 0;
1011
1012 case KERN_INVALID_ADDRESS:
1013 case KERN_INVALID_ARGUMENT:
1014 case KERN_NOT_IN_SET:
1015 case KERN_INVALID_NAME:
1016 case KERN_INVALID_TASK:
1017 case KERN_INVALID_RIGHT:
1018 case KERN_INVALID_VALUE:
1019 case KERN_INVALID_CAPABILITY:
1020 case KERN_INVALID_HOST:
1021 case KERN_MEMORY_PRESENT:
1022 case KERN_INVALID_PROCESSOR_SET:
1023 case KERN_INVALID_POLICY:
1024 case KERN_ALREADY_WAITING:
1025 case KERN_DEFAULT_SET:
1026 case KERN_EXCEPTION_PROTECTED:
1027 case KERN_INVALID_LEDGER:
1028 case KERN_INVALID_MEMORY_CONTROL:
1029 case KERN_INVALID_SECURITY:
1030 case KERN_NOT_DEPRESSED:
1031 case KERN_LOCK_OWNED:
1032 case KERN_LOCK_OWNED_SELF:
1033 return EINVAL;
1034
1035 case KERN_PROTECTION_FAILURE:
1036 case KERN_NOT_RECEIVER:
1037 case KERN_NO_ACCESS:
1038 case KERN_POLICY_STATIC:
1039 return EACCES;
1040
1041 case KERN_NO_SPACE:
1042 case KERN_RESOURCE_SHORTAGE:
1043 case KERN_UREFS_OVERFLOW:
1044 case KERN_INVALID_OBJECT:
1045 return ENOMEM;
1046
1047 case KERN_FAILURE:
1048 return EIO;
1049
1050 case KERN_MEMORY_FAILURE:
1051 case KERN_POLICY_LIMIT:
1052 case KERN_CODESIGN_ERROR:
1053 return EPERM;
1054
1055 case KERN_MEMORY_ERROR:
1056 return EBUSY;
1057
1058 case KERN_ALREADY_IN_SET:
1059 case KERN_NAME_EXISTS:
1060 case KERN_RIGHT_EXISTS:
1061 return EEXIST;
1062
1063 case KERN_ABORTED:
1064 return EINTR;
1065
1066 case KERN_TERMINATED:
1067 case KERN_LOCK_SET_DESTROYED:
1068 case KERN_LOCK_UNSTABLE:
1069 case KERN_SEMAPHORE_DESTROYED:
1070 return ENOENT;
1071
1072 case KERN_RPC_SERVER_TERMINATED:
1073 return ECONNRESET;
1074
1075 case KERN_NOT_SUPPORTED:
1076 return ENOTSUP;
1077
1078 case KERN_NODE_DOWN:
1079 return ENETDOWN;
1080
1081 case KERN_NOT_WAITING:
1082 return ENOENT;
1083
1084 case KERN_OPERATION_TIMED_OUT:
1085 return ETIMEDOUT;
1086
1087 default:
1088 return EIO;
1089 }
1090}
2d21ac55 1091
1c79356b 1092/*
fe8ab488 1093 * ubc_setsize_ex
2d21ac55 1094 *
fe8ab488 1095 * Tell the VM that the the size of the file represented by the vnode has
2d21ac55
A
1096 * changed
1097 *
fe8ab488
A
1098 * Parameters: vp The vp whose backing file size is
1099 * being changed
1100 * nsize The new size of the backing file
1101 * opts Options
1102 *
1103 * Returns: EINVAL for new size < 0
1104 * ENOENT if no UBC info exists
1105 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1106 * Other errors (mapped to errno_t) returned by VM functions
1107 *
1108 * Notes: This function will indicate success if the new size is the
1109 * same or larger than the old size (in this case, the
1110 * remainder of the file will require modification or use of
1111 * an existing upl to access successfully).
1112 *
1113 * This function will fail if the new file size is smaller,
1114 * and the memory region being invalidated was unable to
1115 * actually be invalidated and/or the last page could not be
1116 * flushed, if the new size is not aligned to a page
1117 * boundary. This is usually indicative of an I/O error.
1c79356b 1118 */
0a7de745
A
1119errno_t
1120ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1c79356b 1121{
0a7de745 1122 off_t osize; /* ui_size before change */
1c79356b
A
1123 off_t lastpg, olastpgend, lastoff;
1124 struct ubc_info *uip;
0b4e3aa0 1125 memory_object_control_t control;
2d21ac55 1126 kern_return_t kret = KERN_SUCCESS;
1c79356b 1127
0a7de745 1128 if (nsize < (off_t)0) {
fe8ab488 1129 return EINVAL;
0a7de745 1130 }
1c79356b 1131
0a7de745 1132 if (!UBCINFOEXISTS(vp)) {
fe8ab488 1133 return ENOENT;
0a7de745 1134 }
1c79356b
A
1135
1136 uip = vp->v_ubcinfo;
2d21ac55 1137 osize = uip->ui_size;
fe8ab488 1138
0a7de745 1139 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
fe8ab488 1140 return EAGAIN;
0a7de745 1141 }
fe8ab488 1142
2d21ac55
A
1143 /*
1144 * Update the size before flushing the VM
1145 */
1c79356b
A
1146 uip->ui_size = nsize;
1147
0a7de745 1148 if (nsize >= osize) { /* Nothing more to do */
6d2010ae
A
1149 if (nsize > osize) {
1150 lock_vnode_and_post(vp, NOTE_EXTEND);
1151 }
1152
fe8ab488 1153 return 0;
b0d623f7 1154 }
1c79356b
A
1155
1156 /*
1157 * When the file shrinks, invalidate the pages beyond the
1158 * new size. Also get rid of garbage beyond nsize on the
2d21ac55
A
1159 * last page. The ui_size already has the nsize, so any
1160 * subsequent page-in will zero-fill the tail properly
1c79356b 1161 */
1c79356b
A
1162 lastpg = trunc_page_64(nsize);
1163 olastpgend = round_page_64(osize);
0b4e3aa0
A
1164 control = uip->ui_control;
1165 assert(control);
1c79356b
A
1166 lastoff = (nsize & PAGE_MASK_64);
1167
2d21ac55 1168 if (lastoff) {
0a7de745
A
1169 upl_t upl;
1170 upl_page_info_t *pl;
2d21ac55 1171
fe8ab488 1172 /*
2d21ac55 1173 * new EOF ends up in the middle of a page
fe8ab488 1174 * zero the tail of this page if it's currently
2d21ac55
A
1175 * present in the cache
1176 */
b226f5e5 1177 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
fe8ab488 1178
0a7de745
A
1179 if (kret != KERN_SUCCESS) {
1180 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1181 }
2d21ac55 1182
0a7de745
A
1183 if (upl_valid_page(pl, 0)) {
1184 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1185 }
2d21ac55
A
1186
1187 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 1188
2d21ac55
A
1189 lastpg += PAGE_SIZE_64;
1190 }
1191 if (olastpgend > lastpg) {
0a7de745 1192 int flags;
b0d623f7 1193
0a7de745 1194 if (lastpg == 0) {
b0d623f7 1195 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
0a7de745 1196 } else {
b0d623f7 1197 flags = MEMORY_OBJECT_DATA_FLUSH;
0a7de745 1198 }
fe8ab488 1199 /*
2d21ac55
A
1200 * invalidate the pages beyond the new EOF page
1201 *
1202 */
fe8ab488 1203 kret = memory_object_lock_request(control,
0a7de745
A
1204 (memory_object_offset_t)lastpg,
1205 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1206 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1207 if (kret != KERN_SUCCESS) {
1208 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1209 }
2d21ac55 1210 }
fe8ab488 1211 return mach_to_bsd_errno(kret);
1c79356b
A
1212}
1213
fe8ab488 1214// Returns true for success
0a7de745
A
1215int
1216ubc_setsize(vnode_t vp, off_t nsize)
fe8ab488
A
1217{
1218 return ubc_setsize_ex(vp, nsize, 0) == 0;
1219}
2d21ac55 1220
1c79356b 1221/*
2d21ac55
A
1222 * ubc_getsize
1223 *
1224 * Get the size of the file assocated with the specified vnode
1225 *
1226 * Parameters: vp The vnode whose size is of interest
1227 *
1228 * Returns: 0 There is no ubc_info associated with
1229 * this vnode, or the size is zero
1230 * !0 The size of the file
1231 *
1232 * Notes: Using this routine, it is not possible for a caller to
1233 * successfully distinguish between a vnode associate with a zero
1234 * length file, and a vnode with no associated ubc_info. The
1235 * caller therefore needs to not care, or needs to ensure that
1236 * they have previously successfully called ubc_info_init() or
1237 * ubc_info_init_withsize().
1c79356b
A
1238 */
1239off_t
1240ubc_getsize(struct vnode *vp)
1241{
91447636 1242 /* people depend on the side effect of this working this way
0a7de745 1243 * as they call this for directory
1c79356b 1244 */
0a7de745
A
1245 if (!UBCINFOEXISTS(vp)) {
1246 return (off_t)0;
1247 }
1248 return vp->v_ubcinfo->ui_size;
1c79356b
A
1249}
1250
2d21ac55 1251
1c79356b 1252/*
2d21ac55
A
1253 * ubc_umount
1254 *
fe8ab488 1255 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
2d21ac55
A
1256 * mount point
1257 *
1258 * Parameters: mp The mount point
1259 *
1260 * Returns: 0 Success
1261 *
1262 * Notes: There is no failure indication for this function.
1263 *
1264 * This function is used in the unmount path; since it may block
1265 * I/O indefinitely, it should not be used in the forced unmount
1266 * path, since a device unavailability could also block that
1267 * indefinitely.
1268 *
1269 * Because there is no device ejection interlock on USB, FireWire,
1270 * or similar devices, it's possible that an ejection that begins
1271 * subsequent to the vnode_iterate() completing, either on one of
1272 * those devices, or a network mount for which the server quits
1273 * responding, etc., may cause the caller to block indefinitely.
1c79356b 1274 */
0b4e3aa0 1275__private_extern__ int
1c79356b
A
1276ubc_umount(struct mount *mp)
1277{
91447636 1278 vnode_iterate(mp, 0, ubc_umcallback, 0);
0a7de745 1279 return 0;
1c79356b
A
1280}
1281
2d21ac55
A
1282
1283/*
1284 * ubc_umcallback
1285 *
1286 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1287 * and vnode_iterate() for details of implementation.
1288 */
91447636
A
1289static int
1290ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 1291{
91447636 1292 if (UBCINFOEXISTS(vp)) {
91447636 1293 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 1294 }
0a7de745 1295 return VNODE_RETURNED;
1c79356b
A
1296}
1297
91447636 1298
2d21ac55
A
1299/*
1300 * ubc_getcred
1301 *
1302 * Get the credentials currently active for the ubc_info associated with the
1303 * vnode.
1304 *
1305 * Parameters: vp The vnode whose ubc_info credentials
1306 * are to be retrieved
1307 *
1308 * Returns: !NOCRED The credentials
1309 * NOCRED If there is no ubc_info for the vnode,
1310 * or if there is one, but it has not had
1311 * any credentials associated with it via
1312 * a call to ubc_setcred()
1313 */
91447636 1314kauth_cred_t
1c79356b
A
1315ubc_getcred(struct vnode *vp)
1316{
0a7de745
A
1317 if (UBCINFOEXISTS(vp)) {
1318 return vp->v_ubcinfo->ui_ucred;
1319 }
1c79356b 1320
0a7de745 1321 return NOCRED;
1c79356b
A
1322}
1323
2d21ac55
A
1324
1325/*
1326 * ubc_setthreadcred
1327 *
1328 * If they are not already set, set the credentials of the ubc_info structure
1329 * associated with the vnode to those of the supplied thread; otherwise leave
1330 * them alone.
1331 *
1332 * Parameters: vp The vnode whose ubc_info creds are to
1333 * be set
1334 * p The process whose credentials are to
1335 * be used, if not running on an assumed
1336 * credential
1337 * thread The thread whose credentials are to
1338 * be used
1339 *
1340 * Returns: 1 This vnode has no associated ubc_info
1341 * 0 Success
1342 *
1343 * Notes: This function takes a proc parameter to account for bootstrap
1344 * issues where a task or thread may call this routine, either
1345 * before credentials have been initialized by bsd_init(), or if
1346 * there is no BSD info asscoiate with a mach thread yet. This
1347 * is known to happen in both the initial swap and memory mapping
1348 * calls.
1349 *
1350 * This function is generally used only in the following cases:
1351 *
1352 * o a memory mapped file via the mmap() system call
2d21ac55
A
1353 * o a swap store backing file
1354 * o subsequent to a successful write via vn_write()
1355 *
1356 * The information is then used by the NFS client in order to
1357 * cons up a wire message in either the page-in or page-out path.
1358 *
1359 * There are two potential problems with the use of this API:
1360 *
1361 * o Because the write path only set it on a successful
1362 * write, there is a race window between setting the
1363 * credential and its use to evict the pages to the
1364 * remote file server
1365 *
1366 * o Because a page-in may occur prior to a write, the
1367 * credential may not be set at this time, if the page-in
fe8ab488 1368 * is not the result of a mapping established via mmap().
2d21ac55
A
1369 *
1370 * In both these cases, this will be triggered from the paging
1371 * path, which will instead use the credential of the current
1372 * process, which in this case is either the dynamic_pager or
1373 * the kernel task, both of which utilize "root" credentials.
1374 *
1375 * This may potentially permit operations to occur which should
1376 * be denied, or it may cause to be denied operations which
1377 * should be permitted, depending on the configuration of the NFS
1378 * server.
1379 */
13fec989 1380int
2d21ac55 1381ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
13fec989
A
1382{
1383 struct ubc_info *uip;
1384 kauth_cred_t credp;
2d21ac55 1385 struct uthread *uthread = get_bsdthread_info(thread);
13fec989 1386
0a7de745
A
1387 if (!UBCINFOEXISTS(vp)) {
1388 return 1;
1389 }
13fec989
A
1390
1391 vnode_lock(vp);
1392
1393 uip = vp->v_ubcinfo;
1394 credp = uip->ui_ucred;
1395
0c530ab8 1396 if (!IS_VALID_CRED(credp)) {
13fec989
A
1397 /* use per-thread cred, if assumed identity, else proc cred */
1398 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1399 uip->ui_ucred = kauth_cred_proc_ref(p);
1400 } else {
1401 uip->ui_ucred = uthread->uu_ucred;
1402 kauth_cred_ref(uip->ui_ucred);
1403 }
0a7de745 1404 }
13fec989
A
1405 vnode_unlock(vp);
1406
0a7de745 1407 return 0;
13fec989
A
1408}
1409
2d21ac55 1410
1c79356b 1411/*
2d21ac55
A
1412 * ubc_setcred
1413 *
1414 * If they are not already set, set the credentials of the ubc_info structure
1415 * associated with the vnode to those of the process; otherwise leave them
1416 * alone.
1417 *
1418 * Parameters: vp The vnode whose ubc_info creds are to
1419 * be set
1420 * p The process whose credentials are to
1421 * be used
1422 *
1423 * Returns: 0 This vnode has no associated ubc_info
1424 * 1 Success
1425 *
1426 * Notes: The return values for this function are inverted from nearly
1427 * all other uses in the kernel.
1428 *
1429 * See also ubc_setthreadcred(), above.
1430 *
1431 * This function is considered deprecated, and generally should
1432 * not be used, as it is incompatible with per-thread credentials;
1433 * it exists for legacy KPI reasons.
1434 *
0a7de745 1435 * DEPRECATION: ubc_setcred() is being deprecated. Please use
2d21ac55 1436 * ubc_setthreadcred() instead.
1c79356b 1437 */
1c79356b 1438int
2d21ac55 1439ubc_setcred(struct vnode *vp, proc_t p)
1c79356b
A
1440{
1441 struct ubc_info *uip;
91447636 1442 kauth_cred_t credp;
1c79356b 1443
2d21ac55 1444 /* If there is no ubc_info, deny the operation */
0a7de745
A
1445 if (!UBCINFOEXISTS(vp)) {
1446 return 0;
1447 }
1c79356b 1448
2d21ac55
A
1449 /*
1450 * Check to see if there is already a credential reference in the
1451 * ubc_info; if there is not, take one on the supplied credential.
1452 */
91447636 1453 vnode_lock(vp);
91447636 1454 uip = vp->v_ubcinfo;
1c79356b 1455 credp = uip->ui_ucred;
0c530ab8 1456 if (!IS_VALID_CRED(credp)) {
91447636 1457 uip->ui_ucred = kauth_cred_proc_ref(p);
0a7de745 1458 }
91447636 1459 vnode_unlock(vp);
1c79356b 1460
0a7de745 1461 return 1;
1c79356b
A
1462}
1463
2d21ac55
A
1464/*
1465 * ubc_getpager
1466 *
1467 * Get the pager associated with the ubc_info associated with the vnode.
1468 *
1469 * Parameters: vp The vnode to obtain the pager from
1470 *
1471 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1472 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1473 *
1474 * Notes: For each vnode that has a ubc_info associated with it, that
1475 * ubc_info SHALL have a pager associated with it, so in the
1476 * normal case, it's impossible to return VNODE_PAGER_NULL for
1477 * a vnode with an associated ubc_info.
1478 */
0b4e3aa0 1479__private_extern__ memory_object_t
1c79356b
A
1480ubc_getpager(struct vnode *vp)
1481{
0a7de745
A
1482 if (UBCINFOEXISTS(vp)) {
1483 return vp->v_ubcinfo->ui_pager;
1484 }
1c79356b 1485
0a7de745 1486 return 0;
1c79356b
A
1487}
1488
2d21ac55 1489
1c79356b 1490/*
2d21ac55
A
1491 * ubc_getobject
1492 *
1493 * Get the memory object control associated with the ubc_info associated with
1494 * the vnode
1495 *
1496 * Parameters: vp The vnode to obtain the memory object
1497 * from
1498 * flags DEPRECATED
1499 *
1500 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1501 * MEMORY_OBJECT_CONTROL_NULL
1502 *
1503 * Notes: Historically, if the flags were not "do not reactivate", this
1504 * function would look up the memory object using the pager if
1505 * it did not exist (this could be the case if the vnode had
1506 * been previously reactivated). The flags would also permit a
1507 * hold to be requested, which would have created an object
1508 * reference, if one had not already existed. This usage is
1509 * deprecated, as it would permit a race between finding and
1510 * taking the reference vs. a single reference being dropped in
1511 * another thread.
1c79356b 1512 */
0b4e3aa0 1513memory_object_control_t
91447636 1514ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 1515{
0a7de745
A
1516 if (UBCINFOEXISTS(vp)) {
1517 return vp->v_ubcinfo->ui_control;
1518 }
1c79356b 1519
0a7de745 1520 return MEMORY_OBJECT_CONTROL_NULL;
1c79356b
A
1521}
1522
2d21ac55
A
1523/*
1524 * ubc_blktooff
1525 *
1526 * Convert a given block number to a memory backing object (file) offset for a
1527 * given vnode
1528 *
1529 * Parameters: vp The vnode in which the block is located
1530 * blkno The block number to convert
1531 *
1532 * Returns: !-1 The offset into the backing object
1533 * -1 There is no ubc_info associated with
1534 * the vnode
1535 * -1 An error occurred in the underlying VFS
1536 * while translating the block to an
1537 * offset; the most likely cause is that
1538 * the caller specified a block past the
1539 * end of the file, but this could also be
1540 * any other error from VNOP_BLKTOOFF().
1541 *
1542 * Note: Representing the error in band loses some information, but does
1543 * not occlude a valid offset, since an off_t of -1 is normally
1544 * used to represent EOF. If we had a more reliable constant in
1545 * our header files for it (i.e. explicitly cast to an off_t), we
1546 * would use it here instead.
1547 */
1c79356b 1548off_t
91447636 1549ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b 1550{
2d21ac55 1551 off_t file_offset = -1;
1c79356b
A
1552 int error;
1553
2d21ac55
A
1554 if (UBCINFOEXISTS(vp)) {
1555 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
0a7de745 1556 if (error) {
2d21ac55 1557 file_offset = -1;
0a7de745 1558 }
2d21ac55 1559 }
1c79356b 1560
0a7de745 1561 return file_offset;
1c79356b 1562}
0b4e3aa0 1563
2d21ac55
A
1564
1565/*
1566 * ubc_offtoblk
1567 *
1568 * Convert a given offset in a memory backing object into a block number for a
1569 * given vnode
1570 *
1571 * Parameters: vp The vnode in which the offset is
1572 * located
1573 * offset The offset into the backing object
1574 *
1575 * Returns: !-1 The returned block number
1576 * -1 There is no ubc_info associated with
1577 * the vnode
1578 * -1 An error occurred in the underlying VFS
1579 * while translating the block to an
1580 * offset; the most likely cause is that
1581 * the caller specified a block past the
1582 * end of the file, but this could also be
1583 * any other error from VNOP_OFFTOBLK().
1584 *
1585 * Note: Representing the error in band loses some information, but does
1586 * not occlude a valid block number, since block numbers exceed
1587 * the valid range for offsets, due to their relative sizes. If
1588 * we had a more reliable constant than -1 in our header files
1589 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1590 * here instead.
1591 */
91447636
A
1592daddr64_t
1593ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 1594{
2d21ac55 1595 daddr64_t blkno = -1;
0b4e3aa0 1596 int error = 0;
1c79356b 1597
2d21ac55
A
1598 if (UBCINFOEXISTS(vp)) {
1599 error = VNOP_OFFTOBLK(vp, offset, &blkno);
0a7de745 1600 if (error) {
2d21ac55 1601 blkno = -1;
0a7de745 1602 }
2d21ac55 1603 }
1c79356b 1604
0a7de745 1605 return blkno;
1c79356b
A
1606}
1607
2d21ac55
A
1608
1609/*
1610 * ubc_pages_resident
1611 *
1612 * Determine whether or not a given vnode has pages resident via the memory
1613 * object control associated with the ubc_info associated with the vnode
1614 *
1615 * Parameters: vp The vnode we want to know about
1616 *
1617 * Returns: 1 Yes
1618 * 0 No
1619 */
1c79356b 1620int
91447636 1621ubc_pages_resident(vnode_t vp)
1c79356b 1622{
0a7de745
A
1623 kern_return_t kret;
1624 boolean_t has_pages_resident;
1625
1626 if (!UBCINFOEXISTS(vp)) {
1627 return 0;
1628 }
1629
2d21ac55
A
1630 /*
1631 * The following call may fail if an invalid ui_control is specified,
1632 * or if there is no VM object associated with the control object. In
1633 * either case, reacting to it as if there were no pages resident will
1634 * result in correct behavior.
1635 */
91447636 1636 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
0a7de745
A
1637
1638 if (kret != KERN_SUCCESS) {
1639 return 0;
1640 }
1641
1642 if (has_pages_resident == TRUE) {
1643 return 1;
1644 }
1645
1646 return 0;
91447636 1647}
1c79356b 1648
0b4e3aa0 1649/*
2d21ac55
A
1650 * ubc_msync
1651 *
1652 * Clean and/or invalidate a range in the memory object that backs this vnode
1653 *
1654 * Parameters: vp The vnode whose associated ubc_info's
1655 * associated memory object is to have a
1656 * range invalidated within it
1657 * beg_off The start of the range, as an offset
1658 * end_off The end of the range, as an offset
1659 * resid_off The address of an off_t supplied by the
1660 * caller; may be set to NULL to ignore
1661 * flags See ubc_msync_internal()
1662 *
1663 * Returns: 0 Success
1664 * !0 Failure; an errno is returned
1665 *
1666 * Implicit Returns:
1667 * *resid_off, modified If non-NULL, the contents are ALWAYS
1668 * modified; they are initialized to the
1669 * beg_off, and in case of an I/O error,
1670 * the difference between beg_off and the
1671 * current value will reflect what was
1672 * able to be written before the error
1673 * occurred. If no error is returned, the
1674 * value of the resid_off is undefined; do
1675 * NOT use it in place of end_off if you
1676 * intend to increment from the end of the
1677 * last call and call iteratively.
1678 *
1679 * Notes: see ubc_msync_internal() for more detailed information.
1680 *
0b4e3aa0 1681 */
91447636
A
1682errno_t
1683ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 1684{
0a7de745 1685 int retval;
91447636 1686 int io_errno = 0;
0b4e3aa0 1687
0a7de745
A
1688 if (resid_off) {
1689 *resid_off = beg_off;
1690 }
1691
1692 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 1693
0a7de745
A
1694 if (retval == 0 && io_errno == 0) {
1695 return EINVAL;
1696 }
1697 return io_errno;
91447636 1698}
0b4e3aa0 1699
1c79356b 1700
1c79356b 1701/*
fe8ab488
A
1702 * ubc_msync_internal
1703 *
2d21ac55
A
1704 * Clean and/or invalidate a range in the memory object that backs this vnode
1705 *
1706 * Parameters: vp The vnode whose associated ubc_info's
1707 * associated memory object is to have a
1708 * range invalidated within it
1709 * beg_off The start of the range, as an offset
1710 * end_off The end of the range, as an offset
1711 * resid_off The address of an off_t supplied by the
1712 * caller; may be set to NULL to ignore
1713 * flags MUST contain at least one of the flags
1714 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1715 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1716 * UBC_SYNC may also be specified to cause
1717 * this function to block until the
1718 * operation is complete. The behavior
1719 * of UBC_SYNC is otherwise undefined.
1720 * io_errno The address of an int to contain the
1721 * errno from a failed I/O operation, if
1722 * one occurs; may be set to NULL to
1723 * ignore
1724 *
1725 * Returns: 1 Success
1726 * 0 Failure
1727 *
1728 * Implicit Returns:
1729 * *resid_off, modified The contents of this offset MAY be
1730 * modified; in case of an I/O error, the
1731 * difference between beg_off and the
1732 * current value will reflect what was
1733 * able to be written before the error
1734 * occurred.
1735 * *io_errno, modified The contents of this offset are set to
1736 * an errno, if an error occurs; if the
1737 * caller supplies an io_errno parameter,
1738 * they should be careful to initialize it
1739 * to 0 before calling this function to
1740 * enable them to distinguish an error
1741 * with a valid *resid_off from an invalid
1742 * one, and to avoid potentially falsely
1743 * reporting an error, depending on use.
1744 *
1745 * Notes: If there is no ubc_info associated with the vnode supplied,
1746 * this function immediately returns success.
1747 *
1748 * If the value of end_off is less than or equal to beg_off, this
1749 * function immediately returns success; that is, end_off is NOT
1750 * inclusive.
1751 *
1752 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1753 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1754 * attempt to block on in-progress I/O by calling this function
1755 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1756 * in order to block pending on the I/O already in progress.
1757 *
1758 * The start offset is truncated to the page boundary and the
1759 * size is adjusted to include the last page in the range; that
1760 * is, end_off on exactly a page boundary will not change if it
1761 * is rounded, and the range of bytes written will be from the
1762 * truncate beg_off to the rounded (end_off - 1).
1c79356b 1763 */
91447636
A
1764static int
1765ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 1766{
0a7de745
A
1767 memory_object_size_t tsize;
1768 kern_return_t kret;
91447636
A
1769 int request_flags = 0;
1770 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
0a7de745
A
1771
1772 if (!UBCINFOEXISTS(vp)) {
1773 return 0;
1774 }
1775 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1776 return 0;
1777 }
1778 if (end_off <= beg_off) {
1779 return 1;
1780 }
1781
1782 if (flags & UBC_INVALIDATE) {
1783 /*
91447636
A
1784 * discard the resident pages
1785 */
1786 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
0a7de745 1787 }
1c79356b 1788
0a7de745
A
1789 if (flags & UBC_SYNC) {
1790 /*
91447636 1791 * wait for all the I/O to complete before returning
55e303ae 1792 */
0a7de745
A
1793 request_flags |= MEMORY_OBJECT_IO_SYNC;
1794 }
55e303ae 1795
0a7de745
A
1796 if (flags & UBC_PUSHDIRTY) {
1797 /*
91447636
A
1798 * we only return the dirty pages in the range
1799 */
0a7de745
A
1800 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1801 }
0b4e3aa0 1802
0a7de745
A
1803 if (flags & UBC_PUSHALL) {
1804 /*
2d21ac55
A
1805 * then return all the interesting pages in the range (both
1806 * dirty and precious) to the pager
91447636 1807 */
0a7de745
A
1808 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1809 }
0b4e3aa0 1810
91447636
A
1811 beg_off = trunc_page_64(beg_off);
1812 end_off = round_page_64(end_off);
1813 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 1814
91447636
A
1815 /* flush and/or invalidate pages in the range requested */
1816 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
0a7de745
A
1817 beg_off, tsize,
1818 (memory_object_offset_t *)resid_off,
1819 io_errno, flush_flags, request_flags,
1820 VM_PROT_NO_CHANGE);
1821
1822 return (kret == KERN_SUCCESS) ? 1 : 0;
1c79356b
A
1823}
1824
1c79356b
A
1825
1826/*
fe8ab488 1827 * ubc_map
2d21ac55
A
1828 *
1829 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1830 * to it for the ubc system, if there isn't one already, so it will not be
1831 * recycled while it's in use, and set flags on the ubc_info to indicate that
1832 * we have done this
1833 *
1834 * Parameters: vp The vnode to map
1835 * flags The mapping flags for the vnode; this
1836 * will be a combination of one or more of
1837 * PROT_READ, PROT_WRITE, and PROT_EXEC
1838 *
1839 * Returns: 0 Success
1840 * EPERM Permission was denied
1841 *
1842 * Notes: An I/O reference on the vnode must already be held on entry
1843 *
1844 * If there is no ubc_info associated with the vnode, this function
1845 * will return success.
1846 *
1847 * If a permission error occurs, this function will return
1848 * failure; all other failures will cause this function to return
1849 * success.
1850 *
1851 * IMPORTANT: This is an internal use function, and its symbols
1852 * are not exported, hence its error checking is not very robust.
1853 * It is primarily used by:
1854 *
1855 * o mmap(), when mapping a file
2d21ac55
A
1856 * o When mapping a shared file (a shared library in the
1857 * shared segment region)
1858 * o When loading a program image during the exec process
1859 *
1860 * ...all of these uses ignore the return code, and any fault that
1861 * results later because of a failure is handled in the fix-up path
1862 * of the fault handler. The interface exists primarily as a
1863 * performance hint.
1864 *
1865 * Given that third party implementation of the type of interfaces
1866 * that would use this function, such as alternative executable
1867 * formats, etc., are unsupported, this function is not exported
1868 * for general use.
1869 *
1870 * The extra reference is held until the VM system unmaps the
1871 * vnode from its own context to maintain a vnode reference in
1872 * cases like open()/mmap()/close(), which leave the backing
1873 * object referenced by a mapped memory region in a process
1874 * address space.
1c79356b 1875 */
91447636
A
1876__private_extern__ int
1877ubc_map(vnode_t vp, int flags)
1c79356b
A
1878{
1879 struct ubc_info *uip;
91447636
A
1880 int error = 0;
1881 int need_ref = 0;
2d21ac55 1882 int need_wakeup = 0;
1c79356b 1883
91447636 1884 if (UBCINFOEXISTS(vp)) {
2d21ac55
A
1885 vnode_lock(vp);
1886 uip = vp->v_ubcinfo;
1887
1888 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1889 SET(uip->ui_flags, UI_MAPWAITING);
1890 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 1891 PRIBIO, "ubc_map", NULL);
2d21ac55
A
1892 }
1893 SET(uip->ui_flags, UI_MAPBUSY);
1894 vnode_unlock(vp);
1895
1896 error = VNOP_MMAP(vp, flags, vfs_context_current());
1c79356b 1897
39037602
A
1898 /*
1899 * rdar://problem/22587101 required that we stop propagating
0a7de745 1900 * EPERM up the stack. Otherwise, we would have to funnel up
39037602 1901 * the error at all the call sites for memory_object_map().
0a7de745 1902 * The risk is in having to undo the map/object/entry state at
39037602
A
1903 * all these call sites. It would also affect more than just mmap()
1904 * e.g. vm_remap().
1905 *
1906 * if (error != EPERM)
0a7de745 1907 * error = 0;
39037602
A
1908 */
1909
1910 error = 0;
1c79356b 1911
2d21ac55 1912 vnode_lock_spin(vp);
1c79356b 1913
2d21ac55 1914 if (error == 0) {
0a7de745
A
1915 if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
1916 need_ref = 1;
1917 }
91447636 1918 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
22ba694c
A
1919 if (flags & PROT_WRITE) {
1920 SET(uip->ui_flags, UI_MAPPEDWRITE);
1921 }
2d21ac55
A
1922 }
1923 CLR(uip->ui_flags, UI_MAPBUSY);
55e303ae 1924
2d21ac55
A
1925 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1926 CLR(uip->ui_flags, UI_MAPWAITING);
1927 need_wakeup = 1;
55e303ae 1928 }
2d21ac55 1929 vnode_unlock(vp);
b4c24cb9 1930
0a7de745 1931 if (need_wakeup) {
2d21ac55 1932 wakeup(&uip->ui_flags);
0a7de745 1933 }
2d21ac55 1934
39037602
A
1935 if (need_ref) {
1936 /*
1937 * Make sure we get a ref as we can't unwind from here
1938 */
0a7de745 1939 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
39037602 1940 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
0a7de745 1941 }
cb323159
A
1942 /*
1943 * Vnodes that are on "unreliable" media (like disk
1944 * images, network filesystems, 3rd-party filesystems,
1945 * and possibly external devices) could see their
1946 * contents be changed via the backing store without
1947 * triggering copy-on-write, so we can't fully rely
1948 * on copy-on-write and might have to resort to
1949 * copy-on-read to protect "privileged" processes and
1950 * prevent privilege escalation.
1951 *
1952 * The root filesystem is considered "reliable" because
1953 * there's not much point in trying to protect
1954 * ourselves from such a vulnerability and the extra
1955 * cost of copy-on-read (CPU time and memory pressure)
1956 * could result in some serious regressions.
1957 */
1958 if (vp->v_mount != NULL &&
1959 ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
1960 vnode_on_reliable_media(vp))) {
1961 /*
1962 * This vnode is deemed "reliable" so mark
1963 * its VM object as "trusted".
1964 */
1965 memory_object_mark_trusted(uip->ui_control);
1966 } else {
1967// printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1968 }
39037602 1969 }
2d21ac55 1970 }
0a7de745 1971 return error;
0b4e3aa0
A
1972}
1973
2d21ac55 1974
0b4e3aa0 1975/*
2d21ac55
A
1976 * ubc_destroy_named
1977 *
1978 * Destroy the named memory object associated with the ubc_info control object
1979 * associated with the designated vnode, if there is a ubc_info associated
1980 * with the vnode, and a control object is associated with it
1981 *
1982 * Parameters: vp The designated vnode
1983 *
1984 * Returns: (void)
1985 *
1986 * Notes: This function is called on vnode termination for all vnodes,
1987 * and must therefore not assume that there is a ubc_info that is
1988 * associated with the vnode, nor that there is a control object
1989 * associated with the ubc_info.
1990 *
1991 * If all the conditions necessary are present, this function
1992 * calls memory_object_destory(), which will in turn end up
1993 * calling ubc_unmap() to release any vnode references that were
1994 * established via ubc_map().
1995 *
1996 * IMPORTANT: This is an internal use function that is used
1997 * exclusively by the internal use function vclean().
0b4e3aa0 1998 */
2d21ac55
A
1999__private_extern__ void
2000ubc_destroy_named(vnode_t vp)
0b4e3aa0
A
2001{
2002 memory_object_control_t control;
0b4e3aa0
A
2003 struct ubc_info *uip;
2004 kern_return_t kret;
2005
2d21ac55 2006 if (UBCINFOEXISTS(vp)) {
0a7de745 2007 uip = vp->v_ubcinfo;
2d21ac55
A
2008
2009 /* Terminate the memory object */
2010 control = ubc_getobject(vp, UBC_HOLDOBJECT);
2011 if (control != MEMORY_OBJECT_CONTROL_NULL) {
0a7de745
A
2012 kret = memory_object_destroy(control, 0);
2013 if (kret != KERN_SUCCESS) {
2014 panic("ubc_destroy_named: memory_object_destroy failed");
2015 }
0b4e3aa0
A
2016 }
2017 }
1c79356b
A
2018}
2019
0b4e3aa0 2020
1c79356b 2021/*
2d21ac55
A
2022 * ubc_isinuse
2023 *
2024 * Determine whether or not a vnode is currently in use by ubc at a level in
2025 * excess of the requested busycount
2026 *
2027 * Parameters: vp The vnode to check
2028 * busycount The threshold busy count, used to bias
2029 * the count usually already held by the
2030 * caller to avoid races
2031 *
2032 * Returns: 1 The vnode is in use over the threshold
2033 * 0 The vnode is not in use over the
2034 * threshold
2035 *
2036 * Notes: Because the vnode is only held locked while actually asking
2037 * the use count, this function only represents a snapshot of the
2038 * current state of the vnode. If more accurate information is
2039 * required, an additional busycount should be held by the caller
2040 * and a non-zero busycount used.
2041 *
2042 * If there is no ubc_info associated with the vnode, this
2043 * function will report that the vnode is not in use by ubc.
1c79356b
A
2044 */
2045int
91447636 2046ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 2047{
0a7de745
A
2048 if (!UBCINFOEXISTS(vp)) {
2049 return 0;
2050 }
2051 return ubc_isinuse_locked(vp, busycount, 0);
1c79356b
A
2052}
2053
91447636 2054
2d21ac55
A
2055/*
2056 * ubc_isinuse_locked
2057 *
2058 * Determine whether or not a vnode is currently in use by ubc at a level in
2059 * excess of the requested busycount
2060 *
2061 * Parameters: vp The vnode to check
2062 * busycount The threshold busy count, used to bias
2063 * the count usually already held by the
2064 * caller to avoid races
2065 * locked True if the vnode is already locked by
2066 * the caller
2067 *
2068 * Returns: 1 The vnode is in use over the threshold
2069 * 0 The vnode is not in use over the
2070 * threshold
2071 *
2072 * Notes: If the vnode is not locked on entry, it is locked while
2073 * actually asking the use count. If this is the case, this
2074 * function only represents a snapshot of the current state of
2075 * the vnode. If more accurate information is required, the
2076 * vnode lock should be held by the caller, otherwise an
2077 * additional busycount should be held by the caller and a
2078 * non-zero busycount used.
2079 *
2080 * If there is no ubc_info associated with the vnode, this
2081 * function will report that the vnode is not in use by ubc.
2082 */
1c79356b 2083int
91447636 2084ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 2085{
91447636 2086 int retval = 0;
1c79356b 2087
9bccf70c 2088
0a7de745 2089 if (!locked) {
b0d623f7 2090 vnode_lock_spin(vp);
0a7de745 2091 }
1c79356b 2092
0a7de745 2093 if ((vp->v_usecount - vp->v_kusecount) > busycount) {
91447636 2094 retval = 1;
0a7de745 2095 }
91447636 2096
0a7de745 2097 if (!locked) {
91447636 2098 vnode_unlock(vp);
0a7de745
A
2099 }
2100 return retval;
1c79356b
A
2101}
2102
91447636 2103
1c79356b 2104/*
2d21ac55
A
2105 * ubc_unmap
2106 *
2107 * Reverse the effects of a ubc_map() call for a given vnode
2108 *
2109 * Parameters: vp vnode to unmap from ubc
2110 *
2111 * Returns: (void)
2112 *
2113 * Notes: This is an internal use function used by vnode_pager_unmap().
2114 * It will attempt to obtain a reference on the supplied vnode,
2115 * and if it can do so, and there is an associated ubc_info, and
2116 * the flags indicate that it was mapped via ubc_map(), then the
2117 * flag is cleared, the mapping removed, and the reference taken
2118 * by ubc_map() is released.
2119 *
2120 * IMPORTANT: This MUST only be called by the VM
2121 * to prevent race conditions.
1c79356b 2122 */
0b4e3aa0 2123__private_extern__ void
1c79356b
A
2124ubc_unmap(struct vnode *vp)
2125{
2126 struct ubc_info *uip;
0a7de745
A
2127 int need_rele = 0;
2128 int need_wakeup = 0;
b0d623f7 2129
0a7de745
A
2130 if (vnode_getwithref(vp)) {
2131 return;
2132 }
1c79356b 2133
91447636 2134 if (UBCINFOEXISTS(vp)) {
fe8ab488
A
2135 bool want_fsevent = false;
2136
91447636 2137 vnode_lock(vp);
91447636 2138 uip = vp->v_ubcinfo;
2d21ac55
A
2139
2140 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2141 SET(uip->ui_flags, UI_MAPWAITING);
2142 (void) msleep(&uip->ui_flags, &vp->v_lock,
0a7de745 2143 PRIBIO, "ubc_unmap", NULL);
2d21ac55
A
2144 }
2145 SET(uip->ui_flags, UI_MAPBUSY);
2146
91447636 2147 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
0a7de745 2148 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
fe8ab488 2149 want_fsevent = true;
0a7de745 2150 }
fe8ab488 2151
91447636 2152 need_rele = 1;
fe8ab488
A
2153
2154 /*
2155 * We want to clear the mapped flags after we've called
2156 * VNOP_MNOMAP to avoid certain races and allow
2157 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2158 */
91447636
A
2159 }
2160 vnode_unlock(vp);
fe8ab488 2161
91447636 2162 if (need_rele) {
0a7de745 2163 vfs_context_t ctx = vfs_context_current();
fe8ab488 2164
0a7de745 2165 (void)VNOP_MNOMAP(vp, ctx);
fe8ab488
A
2166
2167#if CONFIG_FSE
0a7de745
A
2168 /*
2169 * Why do we want an fsevent here? Normally the
2170 * content modified fsevent is posted when a file is
2171 * closed and only if it's written to via conventional
2172 * means. It's perfectly legal to close a file and
2173 * keep your mappings and we don't currently track
2174 * whether it was written to via a mapping.
2175 * Therefore, we need to post an fsevent here if the
2176 * file was mapped writable. This may result in false
2177 * events, i.e. we post a notification when nothing
2178 * has really changed.
2179 */
2180 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2181 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2182 FSE_ARG_VNODE, vp,
2183 FSE_ARG_DONE);
2184 }
fe8ab488
A
2185#endif
2186
0a7de745 2187 vnode_rele(vp);
91447636 2188 }
2d21ac55
A
2189
2190 vnode_lock_spin(vp);
2191
0a7de745 2192 if (need_rele) {
fe8ab488 2193 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
0a7de745 2194 }
fe8ab488 2195
2d21ac55 2196 CLR(uip->ui_flags, UI_MAPBUSY);
fe8ab488 2197
2d21ac55
A
2198 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2199 CLR(uip->ui_flags, UI_MAPWAITING);
2200 need_wakeup = 1;
2201 }
2202 vnode_unlock(vp);
2203
0a7de745
A
2204 if (need_wakeup) {
2205 wakeup(&uip->ui_flags);
2206 }
91447636
A
2207 }
2208 /*
2209 * the drop of the vnode ref will cleanup
2210 */
2211 vnode_put(vp);
0b4e3aa0
A
2212}
2213
2d21ac55
A
2214
2215/*
2216 * ubc_page_op
2217 *
2218 * Manipulate individual page state for a vnode with an associated ubc_info
2219 * with an associated memory object control.
2220 *
2221 * Parameters: vp The vnode backing the page
2222 * f_offset A file offset interior to the page
2223 * ops The operations to perform, as a bitmap
2224 * (see below for more information)
2225 * phys_entryp The address of a ppnum_t; may be NULL
2226 * to ignore
2227 * flagsp A pointer to an int to contain flags;
2228 * may be NULL to ignore
2229 *
2230 * Returns: KERN_SUCCESS Success
2231 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2232 * object associated
2233 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2234 * not physically contiguous
2235 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2236 * physically contiguous
2237 * KERN_FAILURE If the page cannot be looked up
2238 *
2239 * Implicit Returns:
2240 * *phys_entryp (modified) If phys_entryp is non-NULL and
2241 * UPL_POP_PHYSICAL
2242 * *flagsp (modified) If flagsp is non-NULL and there was
2243 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2244 *
2245 * Notes: For object boundaries, it is considerably more efficient to
2246 * ensure that f_offset is in fact on a page boundary, as this
2247 * will avoid internal use of the hash table to identify the
2248 * page, and would therefore skip a number of early optimizations.
2249 * Since this is a page operation anyway, the caller should try
2250 * to pass only a page aligned offset because of this.
2251 *
2252 * *flagsp may be modified even if this function fails. If it is
2253 * modified, it will contain the condition of the page before the
2254 * requested operation was attempted; these will only include the
2255 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2256 * UPL_POP_SET, or UPL_POP_CLR bits.
2257 *
2258 * The flags field may contain a specific operation, such as
2259 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2260 *
2261 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2262 * *phys_entryp and successful, set
2263 * *phys_entryp
2264 * o UPL_POP_DUMP Dump the specified page
2265 *
2266 * Otherwise, it is treated as a bitmap of one or more page
2267 * operations to perform on the final memory object; allowable
2268 * bit values are:
2269 *
2270 * o UPL_POP_DIRTY The page is dirty
2271 * o UPL_POP_PAGEOUT The page is paged out
2272 * o UPL_POP_PRECIOUS The page is precious
2273 * o UPL_POP_ABSENT The page is absent
2274 * o UPL_POP_BUSY The page is busy
2275 *
2276 * If the page status is only being queried and not modified, then
2277 * not other bits should be specified. However, if it is being
2278 * modified, exactly ONE of the following bits should be set:
2279 *
2280 * o UPL_POP_SET Set the current bitmap bits
2281 * o UPL_POP_CLR Clear the current bitmap bits
2282 *
2283 * Thus to effect a combination of setting an clearing, it may be
2284 * necessary to call this function twice. If this is done, the
2285 * set should be used before the clear, since clearing may trigger
2286 * a wakeup on the destination page, and if the page is backed by
2287 * an encrypted swap file, setting will trigger the decryption
2288 * needed before the wakeup occurs.
2289 */
0b4e3aa0
A
2290kern_return_t
2291ubc_page_op(
0a7de745
A
2292 struct vnode *vp,
2293 off_t f_offset,
2294 int ops,
2295 ppnum_t *phys_entryp,
2296 int *flagsp)
0b4e3aa0 2297{
0a7de745 2298 memory_object_control_t control;
0b4e3aa0
A
2299
2300 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2301 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2302 return KERN_INVALID_ARGUMENT;
0a7de745 2303 }
0b4e3aa0 2304
0a7de745
A
2305 return memory_object_page_op(control,
2306 (memory_object_offset_t)f_offset,
2307 ops,
2308 phys_entryp,
2309 flagsp);
0b4e3aa0 2310}
2d21ac55
A
2311
2312
2313/*
2314 * ubc_range_op
2315 *
2316 * Manipulate page state for a range of memory for a vnode with an associated
2317 * ubc_info with an associated memory object control, when page level state is
2318 * not required to be returned from the call (i.e. there are no phys_entryp or
2319 * flagsp parameters to this call, and it takes a range which may contain
2320 * multiple pages, rather than an offset interior to a single page).
2321 *
2322 * Parameters: vp The vnode backing the page
2323 * f_offset_beg A file offset interior to the start page
2324 * f_offset_end A file offset interior to the end page
2325 * ops The operations to perform, as a bitmap
2326 * (see below for more information)
2327 * range The address of an int; may be NULL to
2328 * ignore
2329 *
2330 * Returns: KERN_SUCCESS Success
2331 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2332 * object associated
2333 * KERN_INVALID_OBJECT If the object is physically contiguous
2334 *
2335 * Implicit Returns:
2336 * *range (modified) If range is non-NULL, its contents will
2337 * be modified to contain the number of
2338 * bytes successfully operated upon.
2339 *
2340 * Notes: IMPORTANT: This function cannot be used on a range that
2341 * consists of physically contiguous pages.
2342 *
2343 * For object boundaries, it is considerably more efficient to
2344 * ensure that f_offset_beg and f_offset_end are in fact on page
2345 * boundaries, as this will avoid internal use of the hash table
2346 * to identify the page, and would therefore skip a number of
2347 * early optimizations. Since this is an operation on a set of
2348 * pages anyway, the caller should try to pass only a page aligned
2349 * offsets because of this.
2350 *
2351 * *range will be modified only if this function succeeds.
2352 *
2353 * The flags field MUST contain a specific operation; allowable
2354 * values are:
2355 *
2356 * o UPL_ROP_ABSENT Returns the extent of the range
2357 * presented which is absent, starting
2358 * with the start address presented
2359 *
2360 * o UPL_ROP_PRESENT Returns the extent of the range
2361 * presented which is present (resident),
2362 * starting with the start address
2363 * presented
2364 * o UPL_ROP_DUMP Dump the pages which are found in the
2365 * target object for the target range.
2366 *
2367 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2368 * multiple regions in the range, only the first matching region
2369 * is returned.
2370 */
55e303ae
A
2371kern_return_t
2372ubc_range_op(
0a7de745
A
2373 struct vnode *vp,
2374 off_t f_offset_beg,
2375 off_t f_offset_end,
55e303ae
A
2376 int ops,
2377 int *range)
2378{
0a7de745 2379 memory_object_control_t control;
55e303ae
A
2380
2381 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2382 if (control == MEMORY_OBJECT_CONTROL_NULL) {
55e303ae 2383 return KERN_INVALID_ARGUMENT;
0a7de745 2384 }
55e303ae 2385
0a7de745
A
2386 return memory_object_range_op(control,
2387 (memory_object_offset_t)f_offset_beg,
2388 (memory_object_offset_t)f_offset_end,
2389 ops,
2390 range);
55e303ae 2391}
2d21ac55
A
2392
2393
2394/*
2395 * ubc_create_upl
2396 *
2397 * Given a vnode, cause the population of a portion of the vm_object; based on
2398 * the nature of the request, the pages returned may contain valid data, or
2399 * they may be uninitialized.
2400 *
2401 * Parameters: vp The vnode from which to create the upl
2402 * f_offset The start offset into the backing store
2403 * represented by the vnode
2404 * bufsize The size of the upl to create
2405 * uplp Pointer to the upl_t to receive the
2406 * created upl; MUST NOT be NULL
2407 * plp Pointer to receive the internal page
2408 * list for the created upl; MAY be NULL
2409 * to ignore
2410 *
2411 * Returns: KERN_SUCCESS The requested upl has been created
2412 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2413 * multiple of the page size
2414 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2415 * the vnode, or there is no memory object
0a7de745 2416 * control associated with the ubc_info
2d21ac55
A
2417 * memory_object_upl_request:KERN_INVALID_VALUE
2418 * The supplied upl_flags argument is
2419 * invalid
2420 * Implicit Returns:
0a7de745 2421 * *uplp (modified)
2d21ac55
A
2422 * *plp (modified) If non-NULL, the value of *plp will be
2423 * modified to point to the internal page
2424 * list; this modification may occur even
2425 * if this function is unsuccessful, in
2426 * which case the contents may be invalid
2427 *
2428 * Note: If successful, the returned *uplp MUST subsequently be freed
2429 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2430 * ubc_upl_abort(), or ubc_upl_abort_range().
2431 */
0b4e3aa0 2432kern_return_t
5ba3f43e 2433ubc_create_upl_external(
0a7de745
A
2434 struct vnode *vp,
2435 off_t f_offset,
2436 int bufsize,
2437 upl_t *uplp,
2438 upl_page_info_t **plp,
2439 int uplflags)
5ba3f43e 2440{
0a7de745 2441 return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
5ba3f43e
A
2442}
2443
2444kern_return_t
2445ubc_create_upl_kernel(
0a7de745
A
2446 struct vnode *vp,
2447 off_t f_offset,
2448 int bufsize,
2449 upl_t *uplp,
2450 upl_page_info_t **plp,
2451 int uplflags,
5ba3f43e 2452 vm_tag_t tag)
0b4e3aa0 2453{
0a7de745
A
2454 memory_object_control_t control;
2455 kern_return_t kr;
b0d623f7 2456
0a7de745 2457 if (plp != NULL) {
b0d623f7 2458 *plp = NULL;
0a7de745 2459 }
b0d623f7 2460 *uplp = NULL;
0a7de745
A
2461
2462 if (bufsize & 0xfff) {
0b4e3aa0 2463 return KERN_INVALID_ARGUMENT;
0a7de745 2464 }
0b4e3aa0 2465
0a7de745 2466 if (bufsize > MAX_UPL_SIZE_BYTES) {
6d2010ae 2467 return KERN_INVALID_ARGUMENT;
0a7de745 2468 }
6d2010ae 2469
b0d623f7 2470 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
b0d623f7
A
2471 if (uplflags & UPL_UBC_MSYNC) {
2472 uplflags &= UPL_RET_ONLY_DIRTY;
2473
2474 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
0a7de745 2475 UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7
A
2476 } else if (uplflags & UPL_UBC_PAGEOUT) {
2477 uplflags &= UPL_RET_ONLY_DIRTY;
2478
0a7de745 2479 if (uplflags & UPL_RET_ONLY_DIRTY) {
b0d623f7 2480 uplflags |= UPL_NOBLOCK;
0a7de745 2481 }
b0d623f7
A
2482
2483 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
0a7de745 2484 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
b0d623f7 2485 } else {
316670eb 2486 uplflags |= UPL_RET_ONLY_ABSENT |
0a7de745
A
2487 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2488 UPL_SET_INTERNAL | UPL_SET_LITE;
316670eb
A
2489
2490 /*
2491 * if the requested size == PAGE_SIZE, we don't want to set
2492 * the UPL_NOBLOCK since we may be trying to recover from a
2493 * previous partial pagein I/O that occurred because we were low
2494 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2495 * since we're only asking for a single page, we can block w/o fear
2496 * of tying up pages while waiting for more to become available
2497 */
0a7de745 2498 if (bufsize > PAGE_SIZE) {
316670eb 2499 uplflags |= UPL_NOBLOCK;
0a7de745 2500 }
b0d623f7
A
2501 }
2502 } else {
55e303ae 2503 uplflags &= ~UPL_FOR_PAGEOUT;
55e303ae 2504
b0d623f7
A
2505 if (uplflags & UPL_WILL_BE_DUMPED) {
2506 uplflags &= ~UPL_WILL_BE_DUMPED;
0a7de745
A
2507 uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2508 } else {
2509 uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2510 }
b0d623f7
A
2511 }
2512 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0a7de745 2513 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 2514 return KERN_INVALID_ARGUMENT;
0a7de745 2515 }
0b4e3aa0 2516
5ba3f43e 2517 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
0a7de745 2518 if (kr == KERN_SUCCESS && plp != NULL) {
b0d623f7 2519 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
0a7de745 2520 }
0b4e3aa0
A
2521 return kr;
2522}
0a7de745
A
2523
2524
2d21ac55
A
2525/*
2526 * ubc_upl_maxbufsize
2527 *
2528 * Return the maximum bufsize ubc_create_upl( ) will take.
2529 *
2530 * Parameters: none
2531 *
2532 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2533 */
0a7de745 2534upl_size_t
2d21ac55
A
2535ubc_upl_maxbufsize(
2536 void)
2537{
0a7de745 2538 return MAX_UPL_SIZE_BYTES;
2d21ac55 2539}
0b4e3aa0 2540
2d21ac55
A
2541/*
2542 * ubc_upl_map
2543 *
2544 * Map the page list assocated with the supplied upl into the kernel virtual
2545 * address space at the virtual address indicated by the dst_addr argument;
2546 * the entire upl is mapped
2547 *
2548 * Parameters: upl The upl to map
2549 * dst_addr The address at which to map the upl
2550 *
2551 * Returns: KERN_SUCCESS The upl has been mapped
2552 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2553 * KERN_FAILURE The upl is already mapped
2554 * vm_map_enter:KERN_INVALID_ARGUMENT
2555 * A failure code from vm_map_enter() due
2556 * to an invalid argument
2557 */
0b4e3aa0
A
2558kern_return_t
2559ubc_upl_map(
0a7de745
A
2560 upl_t upl,
2561 vm_offset_t *dst_addr)
0b4e3aa0 2562{
0a7de745 2563 return vm_upl_map(kernel_map, upl, dst_addr);
0b4e3aa0
A
2564}
2565
2566
2d21ac55
A
2567/*
2568 * ubc_upl_unmap
2569 *
2570 * Unmap the page list assocated with the supplied upl from the kernel virtual
2571 * address space; the entire upl is unmapped.
2572 *
2573 * Parameters: upl The upl to unmap
2574 *
2575 * Returns: KERN_SUCCESS The upl has been unmapped
2576 * KERN_FAILURE The upl is not currently mapped
2577 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2578 */
0b4e3aa0
A
2579kern_return_t
2580ubc_upl_unmap(
0a7de745 2581 upl_t upl)
0b4e3aa0 2582{
0a7de745 2583 return vm_upl_unmap(kernel_map, upl);
0b4e3aa0
A
2584}
2585
2d21ac55
A
2586
2587/*
2588 * ubc_upl_commit
2589 *
2590 * Commit the contents of the upl to the backing store
2591 *
2592 * Parameters: upl The upl to commit
2593 *
2594 * Returns: KERN_SUCCESS The upl has been committed
2595 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2596 * KERN_FAILURE The supplied upl does not represent
2597 * device memory, and the offset plus the
2598 * size would exceed the actual size of
2599 * the upl
2600 *
2601 * Notes: In practice, the only return value for this function should be
2602 * KERN_SUCCESS, unless there has been data structure corruption;
2603 * since the upl is deallocated regardless of success or failure,
2604 * there's really nothing to do about this other than panic.
2605 *
2606 * IMPORTANT: Use of this function should not be mixed with use of
2607 * ubc_upl_commit_range(), due to the unconditional deallocation
2608 * by this function.
2609 */
0b4e3aa0
A
2610kern_return_t
2611ubc_upl_commit(
0a7de745 2612 upl_t upl)
0b4e3aa0 2613{
0a7de745
A
2614 upl_page_info_t *pl;
2615 kern_return_t kr;
0b4e3aa0
A
2616
2617 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
fe8ab488 2618 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
0b4e3aa0
A
2619 upl_deallocate(upl);
2620 return kr;
1c79356b
A
2621}
2622
0b4e3aa0 2623
2d21ac55
A
2624/*
2625 * ubc_upl_commit
2626 *
2627 * Commit the contents of the specified range of the upl to the backing store
2628 *
2629 * Parameters: upl The upl to commit
2630 * offset The offset into the upl
2631 * size The size of the region to be committed,
2632 * starting at the specified offset
2633 * flags commit type (see below)
2634 *
2635 * Returns: KERN_SUCCESS The range has been committed
2636 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2637 * KERN_FAILURE The supplied upl does not represent
2638 * device memory, and the offset plus the
2639 * size would exceed the actual size of
2640 * the upl
2641 *
2642 * Notes: IMPORTANT: If the commit is successful, and the object is now
2643 * empty, the upl will be deallocated. Since the caller cannot
2644 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2645 * should generally only be used when the offset is 0 and the size
2646 * is equal to the upl size.
2647 *
2648 * The flags argument is a bitmap of flags on the rage of pages in
2649 * the upl to be committed; allowable flags are:
2650 *
2651 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2652 * both empty and has been
2653 * successfully committed
2654 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2655 * bit; will prevent a
2656 * later pageout
2657 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2658 * bit; will cause a later
2659 * pageout
2660 * o UPL_COMMIT_INACTIVATE Clear each pages
2661 * reference bit; the page
2662 * will not be accessed
2663 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2664 * become busy when an
2665 * IOMemoryDescriptor is
2666 * mapped or redirected,
2667 * and we have to wait for
2668 * an IOKit driver
2669 *
2670 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2671 * not be specified by the caller.
2672 *
2673 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2674 * mutually exclusive, and should not be combined.
2675 */
0b4e3aa0
A
2676kern_return_t
2677ubc_upl_commit_range(
0a7de745
A
2678 upl_t upl,
2679 upl_offset_t offset,
2680 upl_size_t size,
2681 int flags)
0b4e3aa0 2682{
0a7de745
A
2683 upl_page_info_t *pl;
2684 boolean_t empty;
2685 kern_return_t kr;
0b4e3aa0 2686
0a7de745 2687 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
0b4e3aa0 2688 flags |= UPL_COMMIT_NOTIFY_EMPTY;
0a7de745 2689 }
0b4e3aa0 2690
593a1d5f
A
2691 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2692 return KERN_INVALID_ARGUMENT;
2693 }
2694
0b4e3aa0
A
2695 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2696
2697 kr = upl_commit_range(upl, offset, size, flags,
0a7de745 2698 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
0b4e3aa0 2699
0a7de745 2700 if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2701 upl_deallocate(upl);
0a7de745 2702 }
0b4e3aa0
A
2703
2704 return kr;
2705}
2d21ac55
A
2706
2707
2708/*
2709 * ubc_upl_abort_range
2710 *
2711 * Abort the contents of the specified range of the specified upl
2712 *
2713 * Parameters: upl The upl to abort
2714 * offset The offset into the upl
2715 * size The size of the region to be aborted,
2716 * starting at the specified offset
2717 * abort_flags abort type (see below)
2718 *
2719 * Returns: KERN_SUCCESS The range has been aborted
2720 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2721 * KERN_FAILURE The supplied upl does not represent
2722 * device memory, and the offset plus the
2723 * size would exceed the actual size of
2724 * the upl
2725 *
2726 * Notes: IMPORTANT: If the abort is successful, and the object is now
2727 * empty, the upl will be deallocated. Since the caller cannot
2728 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2729 * should generally only be used when the offset is 0 and the size
2730 * is equal to the upl size.
2731 *
2732 * The abort_flags argument is a bitmap of flags on the range of
2733 * pages in the upl to be aborted; allowable flags are:
2734 *
2735 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2736 * empty and has been successfully
2737 * aborted
2738 * o UPL_ABORT_RESTART The operation must be restarted
2739 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2740 * o UPL_ABORT_ERROR An I/O error occurred
2741 * o UPL_ABORT_DUMP_PAGES Just free the pages
2742 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2743 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2744 *
2745 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2746 * not be specified by the caller. It is intended to fulfill the
2747 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2748 * ubc_upl_commit_range(), but is never referenced internally.
2749 *
2750 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2751 * referenced; do not use it.
2752 */
0b4e3aa0
A
2753kern_return_t
2754ubc_upl_abort_range(
0a7de745
A
2755 upl_t upl,
2756 upl_offset_t offset,
2757 upl_size_t size,
2758 int abort_flags)
0b4e3aa0 2759{
0a7de745
A
2760 kern_return_t kr;
2761 boolean_t empty = FALSE;
0b4e3aa0 2762
0a7de745 2763 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
0b4e3aa0 2764 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
0a7de745 2765 }
0b4e3aa0
A
2766
2767 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2768
0a7de745 2769 if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
0b4e3aa0 2770 upl_deallocate(upl);
0a7de745 2771 }
0b4e3aa0
A
2772
2773 return kr;
2774}
2775
2d21ac55
A
2776
2777/*
2778 * ubc_upl_abort
2779 *
2780 * Abort the contents of the specified upl
2781 *
2782 * Parameters: upl The upl to abort
2783 * abort_type abort type (see below)
2784 *
2785 * Returns: KERN_SUCCESS The range has been aborted
2786 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2787 * KERN_FAILURE The supplied upl does not represent
2788 * device memory, and the offset plus the
2789 * size would exceed the actual size of
2790 * the upl
2791 *
2792 * Notes: IMPORTANT: If the abort is successful, and the object is now
2793 * empty, the upl will be deallocated. Since the caller cannot
2794 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2795 * should generally only be used when the offset is 0 and the size
2796 * is equal to the upl size.
2797 *
2798 * The abort_type is a bitmap of flags on the range of
2799 * pages in the upl to be aborted; allowable flags are:
2800 *
2801 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2802 * empty and has been successfully
2803 * aborted
2804 * o UPL_ABORT_RESTART The operation must be restarted
2805 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2806 * o UPL_ABORT_ERROR An I/O error occurred
2807 * o UPL_ABORT_DUMP_PAGES Just free the pages
2808 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2809 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2810 *
2811 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2812 * not be specified by the caller. It is intended to fulfill the
2813 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2814 * ubc_upl_commit_range(), but is never referenced internally.
2815 *
2816 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2817 * referenced; do not use it.
2818 */
0b4e3aa0
A
2819kern_return_t
2820ubc_upl_abort(
0a7de745
A
2821 upl_t upl,
2822 int abort_type)
0b4e3aa0 2823{
0a7de745 2824 kern_return_t kr;
0b4e3aa0
A
2825
2826 kr = upl_abort(upl, abort_type);
2827 upl_deallocate(upl);
2828 return kr;
2829}
2830
2d21ac55
A
2831
2832/*
2833 * ubc_upl_pageinfo
2834 *
2835 * Retrieve the internal page list for the specified upl
2836 *
2837 * Parameters: upl The upl to obtain the page list from
2838 *
2839 * Returns: !NULL The (upl_page_info_t *) for the page
2840 * list internal to the upl
2841 * NULL Error/no page list associated
2842 *
2843 * Notes: IMPORTANT: The function is only valid on internal objects
2844 * where the list request was made with the UPL_INTERNAL flag.
2845 *
2846 * This function is a utility helper function, since some callers
2847 * may not have direct access to the header defining the macro,
2848 * due to abstraction layering constraints.
2849 */
0b4e3aa0
A
2850upl_page_info_t *
2851ubc_upl_pageinfo(
0a7de745
A
2852 upl_t upl)
2853{
2854 return UPL_GET_INTERNAL_PAGE_LIST(upl);
0b4e3aa0 2855}
91447636 2856
91447636 2857
0a7de745 2858int
fe8ab488 2859UBCINFOEXISTS(const struct vnode * vp)
91447636 2860{
0a7de745 2861 return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
91447636
A
2862}
2863
2d21ac55 2864
316670eb
A
2865void
2866ubc_upl_range_needed(
0a7de745
A
2867 upl_t upl,
2868 int index,
2869 int count)
316670eb
A
2870{
2871 upl_range_needed(upl, index, count);
2872}
2873
0a7de745
A
2874boolean_t
2875ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
fe8ab488 2876{
0a7de745 2877 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
fe8ab488 2878 return FALSE;
0a7de745
A
2879 }
2880 if (writable) {
fe8ab488 2881 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
0a7de745 2882 }
fe8ab488
A
2883 return TRUE;
2884}
2885
0a7de745
A
2886boolean_t
2887ubc_is_mapped_writable(const struct vnode *vp)
fe8ab488
A
2888{
2889 boolean_t writable;
2890 return ubc_is_mapped(vp, &writable) && writable;
2891}
2892
316670eb 2893
2d21ac55
A
2894/*
2895 * CODE SIGNING
2896 */
2d21ac55
A
2897static volatile SInt32 cs_blob_size = 0;
2898static volatile SInt32 cs_blob_count = 0;
2899static SInt32 cs_blob_size_peak = 0;
2900static UInt32 cs_blob_size_max = 0;
2901static SInt32 cs_blob_count_peak = 0;
2d21ac55 2902
6d2010ae
A
2903SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
2904SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
2905SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2906SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
2907SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
2d21ac55 2908
3e170ce0
A
2909/*
2910 * Function: csblob_parse_teamid
2911 *
2912 * Description: This function returns a pointer to the team id
0a7de745
A
2913 * stored within the codedirectory of the csblob.
2914 * If the codedirectory predates team-ids, it returns
2915 * NULL.
2916 * This does not copy the name but returns a pointer to
2917 * it within the CD. Subsequently, the CD must be
2918 * available when this is used.
2919 */
3e170ce0
A
2920
2921static const char *
2922csblob_parse_teamid(struct cs_blob *csblob)
2923{
2924 const CS_CodeDirectory *cd;
2925
490019cf 2926 cd = csblob->csb_cd;
3e170ce0 2927
0a7de745 2928 if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3e170ce0 2929 return NULL;
0a7de745 2930 }
3e170ce0 2931
0a7de745 2932 if (cd->teamOffset == 0) {
3e170ce0 2933 return NULL;
0a7de745 2934 }
3e170ce0
A
2935
2936 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
0a7de745 2937 if (cs_debug > 1) {
3e170ce0 2938 printf("found team-id %s in cdblob\n", name);
0a7de745 2939 }
3e170ce0
A
2940
2941 return name;
2942}
2943
39236c6e 2944
593a1d5f
A
2945kern_return_t
2946ubc_cs_blob_allocate(
0a7de745
A
2947 vm_offset_t *blob_addr_p,
2948 vm_size_t *blob_size_p)
593a1d5f 2949{
0a7de745 2950 kern_return_t kr = KERN_FAILURE;
593a1d5f 2951
d9a64523
A
2952 {
2953 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2954
2955 if (*blob_addr_p == 0) {
2956 kr = KERN_NO_SPACE;
2957 } else {
2958 kr = KERN_SUCCESS;
2959 }
593a1d5f 2960 }
d9a64523 2961
593a1d5f
A
2962 return kr;
2963}
2964
2965void
2966ubc_cs_blob_deallocate(
0a7de745
A
2967 vm_offset_t blob_addr,
2968 vm_size_t blob_size)
593a1d5f 2969{
d9a64523
A
2970#if PMAP_CS
2971 if (blob_size > pmap_cs_blob_limit) {
2972 kmem_free(kernel_map, blob_addr, blob_size);
2973 } else
2974#endif
2975 {
0a7de745 2976 kfree(blob_addr, blob_size);
d9a64523 2977 }
39037602
A
2978}
2979
2980/*
2981 * Some codesigned files use a lowest common denominator page size of
2982 * 4KiB, but can be used on systems that have a runtime page size of
2983 * 16KiB. Since faults will only occur on 16KiB ranges in
2984 * cs_validate_range(), we can convert the original Code Directory to
2985 * a multi-level scheme where groups of 4 hashes are combined to form
2986 * a new hash, which represents 16KiB in the on-disk file. This can
2987 * reduce the wired memory requirement for the Code Directory by
2988 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2989 * for unaligned access, which may still attempt to validate on
2990 * non-16KiB multiples for compatibility with 3rd party binaries.
2991 */
2992static boolean_t
2993ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
2994{
2995 const CS_CodeDirectory *cd;
2996
0a7de745 2997
39037602
A
2998 /*
2999 * Only applies to binaries that ship as part of the OS,
3000 * primarily the shared cache.
3001 */
3002 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3003 return FALSE;
3004 }
3005
3006 /*
3007 * If the runtime page size matches the code signing page
3008 * size, there is no work to do.
3009 */
3010 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3011 return FALSE;
3012 }
3013
3014 cd = blob->csb_cd;
3015
3016 /*
3017 * There must be a valid integral multiple of hashes
3018 */
3019 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3020 return FALSE;
3021 }
3022
3023 /*
3024 * Scatter lists must also have ranges that have an integral number of hashes
3025 */
3026 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 3027 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3028 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3029 /* iterate all scatter structs to make sure they are all aligned */
3030 do {
3031 uint32_t sbase = ntohl(scatter->base);
3032 uint32_t scount = ntohl(scatter->count);
3033
3034 /* last scatter? */
3035 if (scount == 0) {
3036 break;
3037 }
3038
3039 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3040 return FALSE;
3041 }
3042
3043 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3044 return FALSE;
3045 }
3046
3047 scatter++;
0a7de745 3048 } while (1);
39037602
A
3049 }
3050
3051 /* Covered range must be a multiple of the new page size */
3052 if (ntohl(cd->codeLimit) & PAGE_MASK) {
3053 return FALSE;
3054 }
3055
3056 /* All checks pass */
3057 return TRUE;
3058}
3059
3060/*
d9a64523
A
3061 * Given a cs_blob with an already chosen best code directory, this
3062 * function allocates memory and copies into it only the blobs that
3063 * will be needed by the kernel, namely the single chosen code
3064 * directory (and not any of its alternatives) and the entitlement
3065 * blob.
3066 *
3067 * This saves significant memory with agile signatures, and additional
3068 * memory for 3rd Party Code because we also omit the CMS blob.
3069 *
3070 * To support multilevel and other potential code directory rewriting,
3071 * the size of a new code directory can be specified. Since that code
3072 * directory will replace the existing code directory,
3073 * ubc_cs_reconstitute_code_signature does not copy the original code
3074 * directory when a size is given, and the caller must fill it in.
39037602 3075 */
d9a64523
A
3076static int
3077ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
0a7de745
A
3078 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
3079 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
39037602 3080{
0a7de745
A
3081 const CS_CodeDirectory *old_cd, *cd;
3082 CS_CodeDirectory *new_cd;
39037602
A
3083 const CS_GenericBlob *entitlements;
3084 vm_offset_t new_blob_addr;
3085 vm_size_t new_blob_size;
3086 vm_size_t new_cdsize;
0a7de745
A
3087 kern_return_t kr;
3088 int error;
39037602
A
3089
3090 old_cd = blob->csb_cd;
3091
d9a64523 3092 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
39037602
A
3093
3094 new_blob_size = sizeof(CS_SuperBlob);
3095 new_blob_size += sizeof(CS_BlobIndex);
3096 new_blob_size += new_cdsize;
3097
3098 if (blob->csb_entitlements_blob) {
3099 /* We need to add a slot for the entitlements */
3100 new_blob_size += sizeof(CS_BlobIndex);
3101 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
3102 }
3103
3104 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
3105 if (kr != KERN_SUCCESS) {
3106 if (cs_debug > 1) {
3107 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
0a7de745 3108 kr);
39037602 3109 }
d9a64523 3110 return ENOMEM;
39037602
A
3111 }
3112
0a7de745 3113 CS_SuperBlob *new_superblob;
39037602
A
3114
3115 new_superblob = (CS_SuperBlob *)new_blob_addr;
3116 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3117 new_superblob->length = htonl((uint32_t)new_blob_size);
3118 if (blob->csb_entitlements_blob) {
0a7de745 3119 vm_size_t ent_offset, cd_offset;
39037602
A
3120
3121 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
3122 ent_offset = cd_offset + new_cdsize;
3123
3124 new_superblob->count = htonl(2);
3125 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3126 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3127 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3128 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3129
3130 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3131
3132 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3133 } else {
d9a64523
A
3134 // Blob is the code directory, directly.
3135 new_cd = (CS_CodeDirectory *)new_blob_addr;
3136 }
39037602 3137
d9a64523
A
3138 if (optional_new_cd_size == 0) {
3139 // Copy code directory, and revalidate.
3140 memcpy(new_cd, old_cd, new_cdsize);
39037602 3141
d9a64523 3142 vm_size_t length = new_blob_size;
39037602 3143
d9a64523
A
3144 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3145
3146 if (error) {
3147 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3148 error);
d9a64523
A
3149
3150 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3151 return error;
3152 }
3153 *new_entitlements_p = entitlements;
3154 } else {
3155 // Caller will fill out and validate code directory.
3156 memset(new_cd, 0, new_cdsize);
3157 *new_entitlements_p = NULL;
3158 }
3159
3160 *new_blob_addr_p = new_blob_addr;
3161 *new_blob_size_p = new_blob_size;
3162 *new_cd_p = new_cd;
3163
3164 return 0;
3165}
3166
3167static int
3168ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3169{
0a7de745
A
3170 const CS_CodeDirectory *old_cd, *cd;
3171 CS_CodeDirectory *new_cd;
d9a64523
A
3172 const CS_GenericBlob *entitlements;
3173 vm_offset_t new_blob_addr;
3174 vm_size_t new_blob_size;
3175 vm_size_t new_cdsize;
0a7de745 3176 int error;
d9a64523 3177
0a7de745 3178 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
d9a64523
A
3179
3180 if (cs_debug > 1) {
3181 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
0a7de745 3182 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
d9a64523
A
3183 }
3184
3185 old_cd = blob->csb_cd;
3186
3187 /* Up to the hashes, we can copy all data */
3188 new_cdsize = ntohl(old_cd->hashOffset);
3189 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3190
3191 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
0a7de745
A
3192 &new_blob_addr, &new_blob_size, &new_cd,
3193 &entitlements);
d9a64523
A
3194 if (error != 0) {
3195 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3196 return error;
39037602
A
3197 }
3198
3199 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3200
3201 /* Update fields in the Code Directory structure */
3202 new_cd->length = htonl((uint32_t)new_cdsize);
3203
3204 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3205 nCodeSlots >>= hashes_per_new_hash_shift;
3206 new_cd->nCodeSlots = htonl(nCodeSlots);
3207
3208 new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
3209
3210 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3211 SC_Scatter *scatter = (SC_Scatter*)
0a7de745 3212 ((char *)new_cd + ntohl(new_cd->scatterOffset));
39037602
A
3213 /* iterate all scatter structs to scale their counts */
3214 do {
3215 uint32_t scount = ntohl(scatter->count);
3216 uint32_t sbase = ntohl(scatter->base);
3217
3218 /* last scatter? */
3219 if (scount == 0) {
3220 break;
3221 }
3222
3223 scount >>= hashes_per_new_hash_shift;
3224 scatter->count = htonl(scount);
3225
3226 sbase >>= hashes_per_new_hash_shift;
3227 scatter->base = htonl(sbase);
3228
3229 scatter++;
0a7de745 3230 } while (1);
39037602
A
3231 }
3232
3233 /* For each group of hashes, hash them together */
3234 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3235 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3236
3237 uint32_t hash_index;
3238 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
0a7de745 3239 union cs_hash_union mdctx;
39037602
A
3240
3241 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3242 const unsigned char *src = src_base + hash_index * source_hash_len;
3243 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3244
3245 blob->csb_hashtype->cs_init(&mdctx);
3246 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3247 blob->csb_hashtype->cs_final(dst, &mdctx);
3248 }
3249
d9a64523
A
3250 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3251 if (error != 0) {
d9a64523 3252 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
0a7de745 3253 error);
39037602
A
3254
3255 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
d9a64523 3256 return error;
39037602
A
3257 }
3258
0a7de745 3259 /* New Code Directory is ready for use, swap it out in the blob structure */
39037602
A
3260 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3261
3262 blob->csb_mem_size = new_blob_size;
3263 blob->csb_mem_kaddr = new_blob_addr;
3264 blob->csb_cd = cd;
3265 blob->csb_entitlements_blob = entitlements;
3266
3267 /* The blob has some cached attributes of the Code Directory, so update those */
3268
3269 blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
3270
3271 blob->csb_hash_pagesize = PAGE_SIZE;
3272 blob->csb_hash_pagemask = PAGE_MASK;
3273 blob->csb_hash_pageshift = PAGE_SHIFT;
3274 blob->csb_end_offset = ntohl(cd->codeLimit);
0a7de745 3275 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39037602 3276 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3277 ((const char*)cd + ntohl(cd->scatterOffset));
39037602
A
3278 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3279 } else {
3280 blob->csb_start_offset = 0;
3281 }
d9a64523
A
3282
3283 return 0;
593a1d5f 3284}
39236c6e 3285
d9a64523
A
3286/*
3287 * Validate the code signature blob, create a struct cs_blob wrapper
3288 * and return it together with a pointer to the chosen code directory
3289 * and entitlements blob.
3290 *
3291 * Note that this takes ownership of the memory as addr, mainly because
3292 * this function can actually replace the passed in blob with another
3293 * one, e.g. when performing multilevel hashing optimization.
3294 */
2d21ac55 3295int
d9a64523
A
3296cs_blob_create_validated(
3297 vm_address_t * const addr,
3298 vm_size_t size,
3299 struct cs_blob ** const ret_blob,
0a7de745 3300 CS_CodeDirectory const ** const ret_cd)
91447636 3301{
0a7de745
A
3302 struct cs_blob *blob;
3303 int error = EINVAL;
2d21ac55 3304 const CS_CodeDirectory *cd;
39037602 3305 const CS_GenericBlob *entitlements;
0a7de745
A
3306 union cs_hash_union mdctx;
3307 size_t length;
15129b1c 3308
0a7de745
A
3309 if (ret_blob) {
3310 *ret_blob = NULL;
3311 }
2d21ac55 3312
0a7de745 3313 blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
2d21ac55
A
3314 if (blob == NULL) {
3315 return ENOMEM;
3316 }
3317
2d21ac55 3318 /* fill in the new blob */
2d21ac55
A
3319 blob->csb_mem_size = size;
3320 blob->csb_mem_offset = 0;
39037602 3321 blob->csb_mem_kaddr = *addr;
39236c6e 3322 blob->csb_flags = 0;
5ba3f43e 3323 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
fe8ab488 3324 blob->csb_platform_binary = 0;
3e170ce0 3325 blob->csb_platform_path = 0;
fe8ab488 3326 blob->csb_teamid = NULL;
39037602
A
3327 blob->csb_entitlements_blob = NULL;
3328 blob->csb_entitlements = NULL;
d9a64523
A
3329 blob->csb_reconstituted = false;
3330
39037602
A
3331 /* Transfer ownership. Even on error, this function will deallocate */
3332 *addr = 0;
3333
2d21ac55
A
3334 /*
3335 * Validate the blob's contents
3336 */
813fb2f6
A
3337 length = (size_t) size;
3338 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 3339 length, &cd, &entitlements);
39236c6e 3340 if (error) {
0a7de745 3341 if (cs_debug) {
39236c6e 3342 printf("CODESIGNING: csblob invalid: %d\n", error);
0a7de745 3343 }
813fb2f6
A
3344 /*
3345 * The vnode checker can't make the rest of this function
3346 * succeed if csblob validation failed, so bail */
3347 goto out;
2d21ac55 3348 } else {
3e170ce0
A
3349 const unsigned char *md_base;
3350 uint8_t hash[CS_HASH_MAX_SIZE];
3351 int md_size;
3352
490019cf 3353 blob->csb_cd = cd;
39037602 3354 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3e170ce0 3355 blob->csb_hashtype = cs_find_md(cd->hashType);
0a7de745 3356 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
3e170ce0 3357 panic("validated CodeDirectory but unsupported type");
0a7de745 3358 }
39037602
A
3359
3360 blob->csb_hash_pageshift = cd->pageSize;
3361 blob->csb_hash_pagesize = (1U << cd->pageSize);
3362 blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
3363 blob->csb_hash_firstlevel_pagesize = 0;
39236c6e 3364 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
39037602 3365 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
0a7de745 3366 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
39236c6e 3367 const SC_Scatter *scatter = (const SC_Scatter*)
0a7de745 3368 ((const char*)cd + ntohl(cd->scatterOffset));
39037602 3369 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
b0d623f7 3370 } else {
3e170ce0 3371 blob->csb_start_offset = 0;
b0d623f7 3372 }
3e170ce0
A
3373 /* compute the blob's cdhash */
3374 md_base = (const unsigned char *) cd;
3375 md_size = ntohl(cd->length);
3376
3377 blob->csb_hashtype->cs_init(&mdctx);
3378 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3379 blob->csb_hashtype->cs_final(hash, &mdctx);
3380
3381 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
2d21ac55
A
3382 }
3383
0a7de745 3384 error = 0;
d9a64523
A
3385
3386out:
0a7de745
A
3387 if (error != 0) {
3388 cs_blob_free(blob);
3389 blob = NULL;
3390 cd = NULL;
3391 }
3392
3393 if (ret_blob != NULL) {
3394 *ret_blob = blob;
3395 }
3396 if (ret_cd != NULL) {
3397 *ret_cd = cd;
3398 }
3399
3400 return error;
d9a64523
A
3401}
3402
3403/*
3404 * Free a cs_blob previously created by cs_blob_create_validated.
3405 */
3406void
3407cs_blob_free(
0a7de745 3408 struct cs_blob * const blob)
d9a64523 3409{
0a7de745
A
3410 if (blob != NULL) {
3411 if (blob->csb_mem_kaddr) {
3412 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3413 blob->csb_mem_kaddr = 0;
3414 }
3415 if (blob->csb_entitlements != NULL) {
3416 osobject_release(blob->csb_entitlements);
3417 blob->csb_entitlements = NULL;
3418 }
3419 (kfree)(blob, sizeof(*blob));
3420 }
d9a64523
A
3421}
3422
3423int
3424ubc_cs_blob_add(
0a7de745
A
3425 struct vnode *vp,
3426 cpu_type_t cputype,
3427 off_t base_offset,
3428 vm_address_t *addr,
3429 vm_size_t size,
d9a64523 3430 struct image_params *imgp,
0a7de745
A
3431 __unused int flags,
3432 struct cs_blob **ret_blob)
d9a64523 3433{
0a7de745
A
3434 kern_return_t kr;
3435 struct ubc_info *uip;
3436 struct cs_blob *blob, *oblob;
3437 int error;
d9a64523 3438 CS_CodeDirectory const *cd;
0a7de745
A
3439 off_t blob_start_offset, blob_end_offset;
3440 boolean_t record_mtime;
d9a64523
A
3441
3442 record_mtime = FALSE;
0a7de745
A
3443 if (ret_blob) {
3444 *ret_blob = NULL;
3445 }
3446
3447 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3448 * Validates the passed in blob in the process. */
3449 error = cs_blob_create_validated(addr, size, &blob, &cd);
3450
3451 if (error != 0) {
d9a64523 3452 printf("malform code signature blob: %d\n", error);
0a7de745
A
3453 return error;
3454 }
d9a64523 3455
0a7de745 3456 blob->csb_cpu_type = cputype;
d9a64523
A
3457 blob->csb_base_offset = base_offset;
3458
3459 /*
593a1d5f
A
3460 * Let policy module check whether the blob's signature is accepted.
3461 */
3462#if CONFIG_MACF
0a7de745 3463 unsigned int cs_flags = blob->csb_flags;
5ba3f43e
A
3464 unsigned int signer_type = blob->csb_signer_type;
3465 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
0a7de745 3466 blob->csb_flags = cs_flags;
5ba3f43e 3467 blob->csb_signer_type = signer_type;
39037602 3468
fe8ab488 3469 if (error) {
0a7de745 3470 if (cs_debug) {
fe8ab488 3471 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
0a7de745 3472 }
593a1d5f 3473 goto out;
fe8ab488 3474 }
39037602 3475 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
0a7de745 3476 if (cs_debug) {
c18c124e 3477 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
0a7de745 3478 }
c18c124e
A
3479 error = EPERM;
3480 goto out;
3481 }
5ba3f43e
A
3482#endif
3483
d9a64523
A
3484#if CONFIG_ENFORCE_SIGNED_CODE
3485 /*
3486 * Reconstitute code signature
3487 */
3488 {
3489 vm_address_t new_mem_kaddr = 0;
3490 vm_size_t new_mem_size = 0;
3491
3492 CS_CodeDirectory *new_cd = NULL;
3493 CS_GenericBlob const *new_entitlements = NULL;
3494
3495 error = ubc_cs_reconstitute_code_signature(blob, 0,
0a7de745
A
3496 &new_mem_kaddr, &new_mem_size,
3497 &new_cd, &new_entitlements);
d9a64523
A
3498
3499 if (error != 0) {
3500 printf("failed code signature reconstitution: %d\n", error);
3501 goto out;
3502 }
3503
3504 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3505
3506 blob->csb_mem_kaddr = new_mem_kaddr;
3507 blob->csb_mem_size = new_mem_size;
3508 blob->csb_cd = new_cd;
3509 blob->csb_entitlements_blob = new_entitlements;
3510 blob->csb_reconstituted = true;
3511 }
3512
3513#endif
3514
3515
39037602 3516 if (blob->csb_flags & CS_PLATFORM_BINARY) {
0a7de745 3517 if (cs_debug > 1) {
fe8ab488 3518 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
0a7de745 3519 }
fe8ab488 3520 blob->csb_platform_binary = 1;
39037602 3521 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
fe8ab488
A
3522 } else {
3523 blob->csb_platform_binary = 0;
3e170ce0
A
3524 blob->csb_platform_path = 0;
3525 blob->csb_teamid = csblob_parse_teamid(blob);
fe8ab488 3526 if (cs_debug > 1) {
0a7de745 3527 if (blob->csb_teamid) {
fe8ab488 3528 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
0a7de745 3529 } else {
fe8ab488 3530 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
0a7de745 3531 }
fe8ab488
A
3532 }
3533 }
39037602 3534
2d21ac55
A
3535 /*
3536 * Validate the blob's coverage
3537 */
3538 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3539 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3540
cf7d32b8
A
3541 if (blob_start_offset >= blob_end_offset ||
3542 blob_start_offset < 0 ||
3543 blob_end_offset <= 0) {
2d21ac55
A
3544 /* reject empty or backwards blob */
3545 error = EINVAL;
3546 goto out;
3547 }
3548
39037602 3549 if (ubc_cs_supports_multilevel_hash(blob)) {
d9a64523
A
3550 error = ubc_cs_convert_to_multilevel_hash(blob);
3551 if (error != 0) {
3552 printf("failed multilevel hash conversion: %d\n", error);
3553 goto out;
3554 }
3555 blob->csb_reconstituted = true;
39037602
A
3556 }
3557
2d21ac55 3558 vnode_lock(vp);
0a7de745 3559 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3560 vnode_unlock(vp);
3561 error = ENOENT;
3562 goto out;
3563 }
3564 uip = vp->v_ubcinfo;
3565
3566 /* check if this new blob overlaps with an existing blob */
3567 for (oblob = uip->cs_blobs;
0a7de745
A
3568 oblob != NULL;
3569 oblob = oblob->csb_next) {
3570 off_t oblob_start_offset, oblob_end_offset;
3571
3572 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3573 vnode_unlock(vp);
3574 error = EALREADY;
3575 goto out;
3576 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
3577 if (!oblob->csb_platform_binary) {
3578 vnode_unlock(vp);
3579 error = EALREADY;
3580 goto out;
3581 }
3582 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3583 if (oblob->csb_platform_binary ||
fe8ab488
A
3584 oblob->csb_teamid == NULL ||
3585 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3586 vnode_unlock(vp);
3587 error = EALREADY;
3588 goto out;
3589 }
0a7de745
A
3590 } else { // non teamid binary needs to be the same for app slices
3591 if (oblob->csb_platform_binary ||
3592 oblob->csb_teamid != NULL) {
fe8ab488
A
3593 vnode_unlock(vp);
3594 error = EALREADY;
3595 goto out;
3596 }
0a7de745 3597 }
2d21ac55 3598
0a7de745
A
3599 oblob_start_offset = (oblob->csb_base_offset +
3600 oblob->csb_start_offset);
3601 oblob_end_offset = (oblob->csb_base_offset +
3602 oblob->csb_end_offset);
3603 if (blob_start_offset >= oblob_end_offset ||
3604 blob_end_offset <= oblob_start_offset) {
3605 /* no conflict with this existing blob */
3606 } else {
3607 /* conflict ! */
3608 if (blob_start_offset == oblob_start_offset &&
3609 blob_end_offset == oblob_end_offset &&
3610 blob->csb_mem_size == oblob->csb_mem_size &&
3611 blob->csb_flags == oblob->csb_flags &&
3612 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3613 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3614 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3615 !bcmp(blob->csb_cdhash,
3616 oblob->csb_cdhash,
3617 CS_CDHASH_LEN)) {
3618 /*
3619 * We already have this blob:
3620 * we'll return success but
3621 * throw away the new blob.
3622 */
3623 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3624 /*
3625 * The old blob matches this one
3626 * but doesn't have any CPU type.
3627 * Update it with whatever the caller
3628 * provided this time.
3629 */
3630 oblob->csb_cpu_type = cputype;
3631 }
3632
3633 /* The signature is still accepted, so update the
3634 * generation count. */
3635 uip->cs_add_gen = cs_blob_generation_count;
3636
3637 vnode_unlock(vp);
3638 if (ret_blob) {
3639 *ret_blob = oblob;
3640 }
3641 error = EAGAIN;
3642 goto out;
3643 } else {
3644 /* different blob: reject the new one */
3645 vnode_unlock(vp);
3646 error = EALREADY;
3647 goto out;
3648 }
3649 }
2d21ac55
A
3650 }
3651
fe8ab488 3652
2d21ac55
A
3653 /* mark this vnode's VM object as having "signed pages" */
3654 kr = memory_object_signed(uip->ui_control, TRUE);
3655 if (kr != KERN_SUCCESS) {
3656 vnode_unlock(vp);
3657 error = ENOENT;
3658 goto out;
3659 }
3660
15129b1c
A
3661 if (uip->cs_blobs == NULL) {
3662 /* loading 1st blob: record the file's current "modify time" */
3663 record_mtime = TRUE;
3664 }
3665
fe8ab488
A
3666 /* set the generation count for cs_blobs */
3667 uip->cs_add_gen = cs_blob_generation_count;
3668
2d21ac55
A
3669 /*
3670 * Add this blob to the list of blobs for this vnode.
3671 * We always add at the front of the list and we never remove a
3672 * blob from the list, so ubc_cs_get_blobs() can return whatever
3673 * the top of the list was and that list will remain valid
3674 * while we validate a page, even after we release the vnode's lock.
3675 */
3676 blob->csb_next = uip->cs_blobs;
3677 uip->cs_blobs = blob;
3678
3679 OSAddAtomic(+1, &cs_blob_count);
3680 if (cs_blob_count > cs_blob_count_peak) {
3681 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
3682 }
0a7de745 3683 OSAddAtomic((SInt32) + blob->csb_mem_size, &cs_blob_size);
b0d623f7
A
3684 if ((SInt32) cs_blob_size > cs_blob_size_peak) {
3685 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
2d21ac55 3686 }
b0d623f7
A
3687 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
3688 cs_blob_size_max = (UInt32) blob->csb_mem_size;
2d21ac55
A
3689 }
3690
c331a0be 3691 if (cs_debug > 1) {
2d21ac55 3692 proc_t p;
39236c6e 3693 const char *name = vnode_getname_printable(vp);
2d21ac55
A
3694 p = current_proc();
3695 printf("CODE SIGNING: proc %d(%s) "
0a7de745
A
3696 "loaded %s signatures for file (%s) "
3697 "range 0x%llx:0x%llx flags 0x%x\n",
3698 p->p_pid, p->p_comm,
3699 blob->csb_cpu_type == -1 ? "detached" : "embedded",
3700 name,
3701 blob->csb_base_offset + blob->csb_start_offset,
3702 blob->csb_base_offset + blob->csb_end_offset,
3703 blob->csb_flags);
39236c6e 3704 vnode_putname_printable(name);
2d21ac55
A
3705 }
3706
2d21ac55
A
3707 vnode_unlock(vp);
3708
15129b1c
A
3709 if (record_mtime) {
3710 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3711 }
3712
0a7de745 3713 if (ret_blob) {
3e170ce0 3714 *ret_blob = blob;
0a7de745 3715 }
3e170ce0 3716
0a7de745 3717 error = 0; /* success ! */
2d21ac55
A
3718
3719out:
3720 if (error) {
0a7de745 3721 if (cs_debug) {
fe8ab488 3722 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
0a7de745 3723 }
fe8ab488 3724
0a7de745 3725 cs_blob_free(blob);
2d21ac55
A
3726 }
3727
3728 if (error == EAGAIN) {
3729 /*
0a7de745 3730 * See above: error is EAGAIN if we were asked
2d21ac55
A
3731 * to add an existing blob again. We cleaned the new
3732 * blob and we want to return success.
3733 */
3734 error = 0;
2d21ac55
A
3735 }
3736
3737 return error;
91447636
A
3738}
3739
3e170ce0
A
3740void
3741csvnode_print_debug(struct vnode *vp)
3742{
0a7de745
A
3743 const char *name = NULL;
3744 struct ubc_info *uip;
3e170ce0
A
3745 struct cs_blob *blob;
3746
3747 name = vnode_getname_printable(vp);
3748 if (name) {
3749 printf("csvnode: name: %s\n", name);
3750 vnode_putname_printable(name);
3751 }
3752
3753 vnode_lock_spin(vp);
3754
0a7de745 3755 if (!UBCINFOEXISTS(vp)) {
3e170ce0
A
3756 blob = NULL;
3757 goto out;
3758 }
3759
3760 uip = vp->v_ubcinfo;
3761 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
3762 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
0a7de745
A
3763 (unsigned long)blob->csb_start_offset,
3764 (unsigned long)blob->csb_end_offset,
3765 blob->csb_flags,
3766 blob->csb_platform_binary ? "yes" : "no",
3767 blob->csb_platform_path ? "yes" : "no",
3768 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3e170ce0
A
3769 }
3770
3771out:
3772 vnode_unlock(vp);
3e170ce0
A
3773}
3774
2d21ac55
A
3775struct cs_blob *
3776ubc_cs_blob_get(
0a7de745
A
3777 struct vnode *vp,
3778 cpu_type_t cputype,
3779 off_t offset)
91447636 3780{
0a7de745
A
3781 struct ubc_info *uip;
3782 struct cs_blob *blob;
2d21ac55
A
3783 off_t offset_in_blob;
3784
3785 vnode_lock_spin(vp);
3786
0a7de745 3787 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3788 blob = NULL;
3789 goto out;
3790 }
3791
3792 uip = vp->v_ubcinfo;
3793 for (blob = uip->cs_blobs;
0a7de745
A
3794 blob != NULL;
3795 blob = blob->csb_next) {
2d21ac55
A
3796 if (cputype != -1 && blob->csb_cpu_type == cputype) {
3797 break;
3798 }
3799 if (offset != -1) {
3800 offset_in_blob = offset - blob->csb_base_offset;
3801 if (offset_in_blob >= blob->csb_start_offset &&
3802 offset_in_blob < blob->csb_end_offset) {
3803 /* our offset is covered by this blob */
3804 break;
3805 }
3806 }
3807 }
3808
3809out:
3810 vnode_unlock(vp);
3811
3812 return blob;
91447636 3813}
2d21ac55
A
3814
3815static void
3816ubc_cs_free(
0a7de745 3817 struct ubc_info *uip)
91447636 3818{
0a7de745 3819 struct cs_blob *blob, *next_blob;
2d21ac55
A
3820
3821 for (blob = uip->cs_blobs;
0a7de745
A
3822 blob != NULL;
3823 blob = next_blob) {
2d21ac55 3824 next_blob = blob->csb_next;
2d21ac55 3825 OSAddAtomic(-1, &cs_blob_count);
0a7de745 3826 OSAddAtomic((SInt32) - blob->csb_mem_size, &cs_blob_size);
d9a64523 3827 cs_blob_free(blob);
2d21ac55 3828 }
6d2010ae
A
3829#if CHECK_CS_VALIDATION_BITMAP
3830 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
3831#endif
2d21ac55 3832 uip->cs_blobs = NULL;
91447636 3833}
2d21ac55 3834
fe8ab488
A
3835/* check cs blob generation on vnode
3836 * returns:
3837 * 0 : Success, the cs_blob attached is current
3838 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3839 */
3840int
3841ubc_cs_generation_check(
0a7de745 3842 struct vnode *vp)
fe8ab488
A
3843{
3844 int retval = ENEEDAUTH;
3845
3846 vnode_lock_spin(vp);
3847
3848 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
3849 retval = 0;
3850 }
3851
3852 vnode_unlock(vp);
3853 return retval;
3854}
3855
3856int
3857ubc_cs_blob_revalidate(
0a7de745 3858 struct vnode *vp,
c18c124e 3859 struct cs_blob *blob,
39037602
A
3860 struct image_params *imgp,
3861 int flags
fe8ab488
A
3862 )
3863{
3864 int error = 0;
fe8ab488 3865 const CS_CodeDirectory *cd = NULL;
39037602 3866 const CS_GenericBlob *entitlements = NULL;
813fb2f6 3867 size_t size;
fe8ab488
A
3868 assert(vp != NULL);
3869 assert(blob != NULL);
3870
813fb2f6
A
3871 size = blob->csb_mem_size;
3872 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
0a7de745 3873 size, &cd, &entitlements);
fe8ab488
A
3874 if (error) {
3875 if (cs_debug) {
3876 printf("CODESIGNING: csblob invalid: %d\n", error);
3877 }
3878 goto out;
3879 }
3880
0a7de745
A
3881 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3882 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
d9a64523
A
3883
3884 if (blob->csb_reconstituted) {
3885 /*
3886 * Code signatures that have been modified after validation
3887 * cannot be revalidated inline from their in-memory blob.
3888 *
3889 * That's okay, though, because the only path left that relies
3890 * on revalidation of existing in-memory blobs is the legacy
3891 * detached signature database path, which only exists on macOS,
3892 * which does not do reconstitution of any kind.
3893 */
3894 if (cs_debug) {
3895 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3896 }
3897
3898 /*
3899 * EAGAIN tells the caller that they may reread the code
3900 * signature and try attaching it again, which is the same
3901 * thing they would do if there was no cs_blob yet in the
3902 * first place.
3903 *
3904 * Conveniently, after ubc_cs_blob_add did a successful
3905 * validation, it will detect that a matching cs_blob (cdhash,
3906 * offset, arch etc.) already exists, and return success
3907 * without re-adding a cs_blob to the vnode.
3908 */
3909 return EAGAIN;
3910 }
3911
fe8ab488
A
3912 /* callout to mac_vnode_check_signature */
3913#if CONFIG_MACF
5ba3f43e 3914 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
fe8ab488 3915 if (cs_debug && error) {
0a7de745 3916 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
fe8ab488 3917 }
39037602
A
3918#else
3919 (void)flags;
5ba3f43e 3920 (void)signer_type;
fe8ab488
A
3921#endif
3922
3923 /* update generation number if success */
3924 vnode_lock_spin(vp);
0a7de745 3925 blob->csb_flags = cs_flags;
5ba3f43e 3926 blob->csb_signer_type = signer_type;
fe8ab488 3927 if (UBCINFOEXISTS(vp)) {
0a7de745 3928 if (error == 0) {
fe8ab488 3929 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
0a7de745 3930 } else {
fe8ab488 3931 vp->v_ubcinfo->cs_add_gen = 0;
0a7de745 3932 }
fe8ab488
A
3933 }
3934
3935 vnode_unlock(vp);
3936
3937out:
3938 return error;
3939}
3940
3941void
3942cs_blob_reset_cache()
3943{
3944 /* incrementing odd no by 2 makes sure '0' is never reached. */
3945 OSAddAtomic(+2, &cs_blob_generation_count);
3946 printf("Reseting cs_blob cache from all vnodes. \n");
3947}
3948
2d21ac55
A
3949struct cs_blob *
3950ubc_get_cs_blobs(
0a7de745 3951 struct vnode *vp)
91447636 3952{
0a7de745
A
3953 struct ubc_info *uip;
3954 struct cs_blob *blobs;
2d21ac55 3955
b0d623f7
A
3956 /*
3957 * No need to take the vnode lock here. The caller must be holding
3958 * a reference on the vnode (via a VM mapping or open file descriptor),
3959 * so the vnode will not go away. The ubc_info stays until the vnode
3960 * goes away. And we only modify "blobs" by adding to the head of the
3961 * list.
3962 * The ubc_info could go away entirely if the vnode gets reclaimed as
3963 * part of a forced unmount. In the case of a code-signature validation
3964 * during a page fault, the "paging_in_progress" reference on the VM
3965 * object guarantess that the vnode pager (and the ubc_info) won't go
3966 * away during the fault.
3967 * Other callers need to protect against vnode reclaim by holding the
3968 * vnode lock, for example.
3969 */
2d21ac55 3970
0a7de745 3971 if (!UBCINFOEXISTS(vp)) {
2d21ac55
A
3972 blobs = NULL;
3973 goto out;
3974 }
3975
3976 uip = vp->v_ubcinfo;
3977 blobs = uip->cs_blobs;
3978
3979out:
2d21ac55 3980 return blobs;
91447636 3981}
2d21ac55 3982
15129b1c
A
3983void
3984ubc_get_cs_mtime(
0a7de745
A
3985 struct vnode *vp,
3986 struct timespec *cs_mtime)
15129b1c 3987{
0a7de745 3988 struct ubc_info *uip;
15129b1c 3989
0a7de745 3990 if (!UBCINFOEXISTS(vp)) {
15129b1c
A
3991 cs_mtime->tv_sec = 0;
3992 cs_mtime->tv_nsec = 0;
3993 return;
3994 }
3995
3996 uip = vp->v_ubcinfo;
3997 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
3998 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
3999}
4000
2d21ac55
A
4001unsigned long cs_validate_page_no_hash = 0;
4002unsigned long cs_validate_page_bad_hash = 0;
39037602
A
4003static boolean_t
4004cs_validate_hash(
0a7de745
A
4005 struct cs_blob *blobs,
4006 memory_object_t pager,
4007 memory_object_offset_t page_offset,
4008 const void *data,
4009 vm_size_t *bytes_processed,
4010 unsigned *tainted)
91447636 4011{
0a7de745
A
4012 union cs_hash_union mdctx;
4013 struct cs_hash const *hashtype = NULL;
4014 unsigned char actual_hash[CS_HASH_MAX_SIZE];
4015 unsigned char expected_hash[CS_HASH_MAX_SIZE];
4016 boolean_t found_hash;
4017 struct cs_blob *blob;
4018 const CS_CodeDirectory *cd;
4019 const unsigned char *hash;
4020 boolean_t validated;
4021 off_t offset; /* page offset in the file */
4022 size_t size;
4023 off_t codeLimit = 0;
4024 const char *lower_bound, *upper_bound;
4025 vm_offset_t kaddr, blob_addr;
2d21ac55
A
4026
4027 /* retrieve the expected hash */
4028 found_hash = FALSE;
2d21ac55
A
4029
4030 for (blob = blobs;
0a7de745
A
4031 blob != NULL;
4032 blob = blob->csb_next) {
2d21ac55
A
4033 offset = page_offset - blob->csb_base_offset;
4034 if (offset < blob->csb_start_offset ||
4035 offset >= blob->csb_end_offset) {
4036 /* our page is not covered by this blob */
4037 continue;
4038 }
4039
39037602 4040 /* blob data has been released */
2d21ac55
A
4041 kaddr = blob->csb_mem_kaddr;
4042 if (kaddr == 0) {
39037602 4043 continue;
2d21ac55 4044 }
39236c6e 4045
2d21ac55 4046 blob_addr = kaddr + blob->csb_mem_offset;
2d21ac55
A
4047 lower_bound = CAST_DOWN(char *, blob_addr);
4048 upper_bound = lower_bound + blob->csb_mem_size;
0a7de745 4049
490019cf 4050 cd = blob->csb_cd;
2d21ac55 4051 if (cd != NULL) {
3e170ce0 4052 /* all CD's that have been injected is already validated */
b0d623f7 4053
3e170ce0 4054 hashtype = blob->csb_hashtype;
0a7de745 4055 if (hashtype == NULL) {
3e170ce0 4056 panic("unknown hash type ?");
0a7de745
A
4057 }
4058 if (hashtype->cs_digest_size > sizeof(actual_hash)) {
3e170ce0 4059 panic("hash size too large");
0a7de745
A
4060 }
4061 if (offset & blob->csb_hash_pagemask) {
39037602 4062 panic("offset not aligned to cshash boundary");
0a7de745 4063 }
3e170ce0 4064
2d21ac55 4065 codeLimit = ntohl(cd->codeLimit);
39236c6e 4066
0a7de745
A
4067 hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
4068 hashtype->cs_size,
4069 lower_bound, upper_bound);
cf7d32b8 4070 if (hash != NULL) {
490019cf 4071 bcopy(hash, expected_hash, hashtype->cs_size);
cf7d32b8
A
4072 found_hash = TRUE;
4073 }
2d21ac55 4074
2d21ac55
A
4075 break;
4076 }
4077 }
4078
4079 if (found_hash == FALSE) {
4080 /*
4081 * We can't verify this page because there is no signature
4082 * for it (yet). It's possible that this part of the object
4083 * is not signed, or that signatures for that part have not
4084 * been loaded yet.
4085 * Report that the page has not been validated and let the
4086 * caller decide if it wants to accept it or not.
4087 */
4088 cs_validate_page_no_hash++;
4089 if (cs_debug > 1) {
4090 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4091 "mobj %p off 0x%llx: no hash to validate !?\n",
4092 pager, page_offset);
2d21ac55
A
4093 }
4094 validated = FALSE;
c18c124e 4095 *tainted = 0;
2d21ac55 4096 } else {
c18c124e
A
4097 *tainted = 0;
4098
39037602
A
4099 size = blob->csb_hash_pagesize;
4100 *bytes_processed = size;
4101
fe8ab488 4102 const uint32_t *asha1, *esha1;
b0d623f7 4103 if ((off_t)(offset + size) > codeLimit) {
2d21ac55
A
4104 /* partial page at end of segment */
4105 assert(offset < codeLimit);
39037602 4106 size = (size_t) (codeLimit & blob->csb_hash_pagemask);
c18c124e 4107 *tainted |= CS_VALIDATE_NX;
2d21ac55 4108 }
3e170ce0
A
4109
4110 hashtype->cs_init(&mdctx);
39037602
A
4111
4112 if (blob->csb_hash_firstlevel_pagesize) {
4113 const unsigned char *partial_data = (const unsigned char *)data;
4114 size_t i;
0a7de745
A
4115 for (i = 0; i < size;) {
4116 union cs_hash_union partialctx;
39037602 4117 unsigned char partial_digest[CS_HASH_MAX_SIZE];
0a7de745 4118 size_t partial_size = MIN(size - i, blob->csb_hash_firstlevel_pagesize);
39037602
A
4119
4120 hashtype->cs_init(&partialctx);
4121 hashtype->cs_update(&partialctx, partial_data, partial_size);
4122 hashtype->cs_final(partial_digest, &partialctx);
4123
4124 /* Update cumulative multi-level hash */
4125 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
4126 partial_data = partial_data + partial_size;
4127 i += partial_size;
4128 }
4129 } else {
4130 hashtype->cs_update(&mdctx, data, size);
4131 }
3e170ce0 4132 hashtype->cs_final(actual_hash, &mdctx);
2d21ac55 4133
fe8ab488
A
4134 asha1 = (const uint32_t *) actual_hash;
4135 esha1 = (const uint32_t *) expected_hash;
4136
490019cf 4137 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
2d21ac55
A
4138 if (cs_debug) {
4139 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4140 "mobj %p off 0x%llx size 0x%lx: "
4141 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4142 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4143 pager, page_offset, size,
4144 asha1[0], asha1[1], asha1[2],
4145 asha1[3], asha1[4],
4146 esha1[0], esha1[1], esha1[2],
4147 esha1[3], esha1[4]);
2d21ac55
A
4148 }
4149 cs_validate_page_bad_hash++;
c18c124e 4150 *tainted |= CS_VALIDATE_TAINTED;
2d21ac55 4151 } else {
39236c6e 4152 if (cs_debug > 10) {
2d21ac55 4153 printf("CODE SIGNING: cs_validate_page: "
0a7de745
A
4154 "mobj %p off 0x%llx size 0x%lx: "
4155 "SHA1 OK\n",
4156 pager, page_offset, size);
2d21ac55 4157 }
2d21ac55
A
4158 }
4159 validated = TRUE;
4160 }
0a7de745 4161
2d21ac55 4162 return validated;
91447636
A
4163}
4164
39037602
A
4165boolean_t
4166cs_validate_range(
0a7de745
A
4167 struct vnode *vp,
4168 memory_object_t pager,
4169 memory_object_offset_t page_offset,
4170 const void *data,
4171 vm_size_t dsize,
4172 unsigned *tainted)
39037602
A
4173{
4174 vm_size_t offset_in_range;
4175 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4176
4177 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4178
4179 *tainted = 0;
4180
4181 for (offset_in_range = 0;
0a7de745
A
4182 offset_in_range < dsize;
4183 /* offset_in_range updated based on bytes processed */) {
39037602
A
4184 unsigned subrange_tainted = 0;
4185 boolean_t subrange_validated;
4186 vm_size_t bytes_processed = 0;
4187
4188 subrange_validated = cs_validate_hash(blobs,
0a7de745
A
4189 pager,
4190 page_offset + offset_in_range,
4191 (const void *)((const char *)data + offset_in_range),
4192 &bytes_processed,
4193 &subrange_tainted);
39037602
A
4194
4195 *tainted |= subrange_tainted;
4196
4197 if (bytes_processed == 0) {
4198 /* Cannote make forward progress, so return an error */
4199 all_subranges_validated = FALSE;
4200 break;
4201 } else if (subrange_validated == FALSE) {
4202 all_subranges_validated = FALSE;
4203 /* Keep going to detect other types of failures in subranges */
4204 }
4205
4206 offset_in_range += bytes_processed;
4207 }
4208
4209 return all_subranges_validated;
4210}
4211
2d21ac55
A
4212int
4213ubc_cs_getcdhash(
0a7de745
A
4214 vnode_t vp,
4215 off_t offset,
4216 unsigned char *cdhash)
2d21ac55 4217{
0a7de745
A
4218 struct cs_blob *blobs, *blob;
4219 off_t rel_offset;
4220 int ret;
b0d623f7
A
4221
4222 vnode_lock(vp);
2d21ac55
A
4223
4224 blobs = ubc_get_cs_blobs(vp);
4225 for (blob = blobs;
0a7de745
A
4226 blob != NULL;
4227 blob = blob->csb_next) {
2d21ac55
A
4228 /* compute offset relative to this blob */
4229 rel_offset = offset - blob->csb_base_offset;
4230 if (rel_offset >= blob->csb_start_offset &&
4231 rel_offset < blob->csb_end_offset) {
4232 /* this blob does cover our "offset" ! */
4233 break;
4234 }
4235 }
4236
4237 if (blob == NULL) {
4238 /* we didn't find a blob covering "offset" */
b0d623f7
A
4239 ret = EBADEXEC; /* XXX any better error ? */
4240 } else {
4241 /* get the SHA1 hash of that blob */
0a7de745 4242 bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
b0d623f7 4243 ret = 0;
2d21ac55
A
4244 }
4245
b0d623f7 4246 vnode_unlock(vp);
2d21ac55 4247
b0d623f7 4248 return ret;
2d21ac55 4249}
6d2010ae 4250
39037602
A
4251boolean_t
4252ubc_cs_is_range_codesigned(
0a7de745
A
4253 vnode_t vp,
4254 mach_vm_offset_t start,
4255 mach_vm_size_t size)
39037602 4256{
0a7de745
A
4257 struct cs_blob *csblob;
4258 mach_vm_offset_t blob_start;
4259 mach_vm_offset_t blob_end;
39037602
A
4260
4261 if (vp == NULL) {
4262 /* no file: no code signature */
4263 return FALSE;
4264 }
4265 if (size == 0) {
4266 /* no range: no code signature */
4267 return FALSE;
4268 }
4269 if (start + size < start) {
4270 /* overflow */
4271 return FALSE;
4272 }
4273
4274 csblob = ubc_cs_blob_get(vp, -1, start);
4275 if (csblob == NULL) {
4276 return FALSE;
4277 }
4278
4279 /*
4280 * We currently check if the range is covered by a single blob,
4281 * which should always be the case for the dyld shared cache.
4282 * If we ever want to make this routine handle other cases, we
4283 * would have to iterate if the blob does not cover the full range.
4284 */
4285 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4286 csblob->csb_start_offset);
39037602 4287 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
0a7de745 4288 csblob->csb_end_offset);
39037602
A
4289 if (blob_start > start || blob_end < (start + size)) {
4290 /* range not fully covered by this code-signing blob */
4291 return FALSE;
4292 }
4293
4294 return TRUE;
4295}
4296
6d2010ae 4297#if CHECK_CS_VALIDATION_BITMAP
0a7de745
A
4298#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4299extern boolean_t root_fs_upgrade_try;
6d2010ae
A
4300
4301/*
4302 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4303 * Depends:
4304 * a) Is the target vnode on the root filesystem?
4305 * b) Has someone tried to mount the root filesystem read-write?
4306 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4307 */
0a7de745 4308#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6d2010ae
A
4309kern_return_t
4310ubc_cs_validation_bitmap_allocate(
0a7de745 4311 vnode_t vp)
6d2010ae 4312{
0a7de745 4313 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4314 struct ubc_info *uip;
0a7de745
A
4315 char *target_bitmap;
4316 vm_object_size_t bitmap_size;
6d2010ae 4317
0a7de745 4318 if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6d2010ae
A
4319 kr = KERN_INVALID_ARGUMENT;
4320 } else {
4321 uip = vp->v_ubcinfo;
4322
0a7de745 4323 if (uip->cs_valid_bitmap == NULL) {
6d2010ae 4324 bitmap_size = stob(uip->ui_size);
0a7de745 4325 target_bitmap = (char*) kalloc((vm_size_t)bitmap_size );
6d2010ae
A
4326 if (target_bitmap == 0) {
4327 kr = KERN_NO_SPACE;
4328 } else {
4329 kr = KERN_SUCCESS;
4330 }
0a7de745 4331 if (kr == KERN_SUCCESS) {
6d2010ae
A
4332 memset( target_bitmap, 0, (size_t)bitmap_size);
4333 uip->cs_valid_bitmap = (void*)target_bitmap;
4334 uip->cs_valid_bitmap_size = bitmap_size;
4335 }
4336 }
4337 }
4338 return kr;
4339}
4340
4341kern_return_t
0a7de745
A
4342ubc_cs_check_validation_bitmap(
4343 vnode_t vp,
4344 memory_object_offset_t offset,
4345 int optype)
6d2010ae 4346{
0a7de745 4347 kern_return_t kr = KERN_SUCCESS;
6d2010ae 4348
0a7de745 4349 if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6d2010ae
A
4350 kr = KERN_INVALID_ARGUMENT;
4351 } else {
4352 struct ubc_info *uip = vp->v_ubcinfo;
0a7de745 4353 char *target_bitmap = uip->cs_valid_bitmap;
6d2010ae 4354
0a7de745
A
4355 if (target_bitmap == NULL) {
4356 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4357 } else {
0a7de745 4358 uint64_t bit, byte;
6d2010ae
A
4359 bit = atop_64( offset );
4360 byte = bit >> 3;
4361
0a7de745
A
4362 if (byte > uip->cs_valid_bitmap_size) {
4363 kr = KERN_INVALID_ARGUMENT;
6d2010ae 4364 } else {
6d2010ae
A
4365 if (optype == CS_BITMAP_SET) {
4366 target_bitmap[byte] |= (1 << (bit & 07));
4367 kr = KERN_SUCCESS;
4368 } else if (optype == CS_BITMAP_CLEAR) {
4369 target_bitmap[byte] &= ~(1 << (bit & 07));
4370 kr = KERN_SUCCESS;
4371 } else if (optype == CS_BITMAP_CHECK) {
0a7de745 4372 if (target_bitmap[byte] & (1 << (bit & 07))) {
6d2010ae
A
4373 kr = KERN_SUCCESS;
4374 } else {
4375 kr = KERN_FAILURE;
4376 }
4377 }
4378 }
4379 }
4380 }
4381 return kr;
4382}
4383
4384void
4385ubc_cs_validation_bitmap_deallocate(
0a7de745 4386 vnode_t vp)
6d2010ae
A
4387{
4388 struct ubc_info *uip;
0a7de745
A
4389 void *target_bitmap;
4390 vm_object_size_t bitmap_size;
6d2010ae 4391
0a7de745 4392 if (UBCINFOEXISTS(vp)) {
6d2010ae
A
4393 uip = vp->v_ubcinfo;
4394
0a7de745 4395 if ((target_bitmap = uip->cs_valid_bitmap) != NULL) {
6d2010ae
A
4396 bitmap_size = uip->cs_valid_bitmap_size;
4397 kfree( target_bitmap, (vm_size_t) bitmap_size );
4398 uip->cs_valid_bitmap = NULL;
4399 }
4400 }
4401}
4402#else
0a7de745
A
4403kern_return_t
4404ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
4405{
6d2010ae
A
4406 return KERN_INVALID_ARGUMENT;
4407}
4408
0a7de745
A
4409kern_return_t
4410ubc_cs_check_validation_bitmap(
4411 __unused struct vnode *vp,
6d2010ae 4412 __unused memory_object_offset_t offset,
0a7de745
A
4413 __unused int optype)
4414{
6d2010ae
A
4415 return KERN_INVALID_ARGUMENT;
4416}
4417
0a7de745
A
4418void
4419ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
4420{
6d2010ae
A
4421 return;
4422}
4423#endif /* CHECK_CS_VALIDATION_BITMAP */
d9a64523
A
4424
4425#if PMAP_CS
4426kern_return_t
4427cs_associate_blob_with_mapping(
0a7de745
A
4428 void *pmap,
4429 vm_map_offset_t start,
4430 vm_map_size_t size,
4431 vm_object_offset_t offset,
4432 void *blobs_p)
d9a64523 4433{
0a7de745
A
4434 off_t blob_start_offset, blob_end_offset;
4435 kern_return_t kr;
4436 struct cs_blob *blobs, *blob;
4437 vm_offset_t kaddr;
d9a64523
A
4438 struct pmap_cs_code_directory *cd_entry = NULL;
4439
4440 if (!pmap_cs) {
4441 return KERN_NOT_SUPPORTED;
4442 }
0a7de745 4443
d9a64523
A
4444 blobs = (struct cs_blob *)blobs_p;
4445
4446 for (blob = blobs;
0a7de745
A
4447 blob != NULL;
4448 blob = blob->csb_next) {
d9a64523 4449 blob_start_offset = (blob->csb_base_offset +
0a7de745 4450 blob->csb_start_offset);
d9a64523 4451 blob_end_offset = (blob->csb_base_offset +
0a7de745 4452 blob->csb_end_offset);
d9a64523
A
4453 if ((off_t) offset < blob_start_offset ||
4454 (off_t) offset >= blob_end_offset ||
4455 (off_t) (offset + size) <= blob_start_offset ||
4456 (off_t) (offset + size) > blob_end_offset) {
4457 continue;
4458 }
4459 kaddr = blob->csb_mem_kaddr;
4460 if (kaddr == 0) {
4461 /* blob data has been released */
4462 continue;
4463 }
4464 cd_entry = blob->csb_pmap_cs_entry;
4465 if (cd_entry == NULL) {
4466 continue;
4467 }
4468
4469 break;
4470 }
4471
4472 if (cd_entry != NULL) {
4473 kr = pmap_cs_associate(pmap,
0a7de745
A
4474 cd_entry,
4475 start,
4476 size);
d9a64523
A
4477 } else {
4478 kr = KERN_CODESIGN_ERROR;
4479 }
4480#if 00
4481 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
4482 kr = KERN_SUCCESS;
4483#endif
4484 return kr;
4485}
4486#endif /* PMAP_CS */