]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
fe8ab488 2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
1c79356b
A
37 */
38
1c79356b
A
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
91447636
A
43#include <sys/mman.h>
44#include <sys/mount_internal.h>
45#include <sys/vnode_internal.h>
46#include <sys/ubc_internal.h>
1c79356b 47#include <sys/ucred.h>
91447636
A
48#include <sys/proc_internal.h>
49#include <sys/kauth.h>
1c79356b 50#include <sys/buf.h>
13fec989 51#include <sys/user.h>
2d21ac55 52#include <sys/codesign.h>
fe8ab488
A
53#include <sys/codedir_internal.h>
54#include <sys/fsevents.h>
c18c124e 55#include <sys/fcntl.h>
1c79356b
A
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
91447636
A
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
b0d623f7 61#include <mach/mach_vm.h>
91447636 62#include <mach/upl.h>
1c79356b 63
91447636 64#include <kern/kern_types.h>
2d21ac55 65#include <kern/kalloc.h>
1c79356b 66#include <kern/zalloc.h>
13fec989 67#include <kern/thread.h>
5ba3f43e 68#include <vm/pmap.h>
91447636
A
69#include <vm/vm_kern.h>
70#include <vm/vm_protos.h> /* last */
1c79356b 71
2d21ac55 72#include <libkern/crypto/sha1.h>
3e170ce0 73#include <libkern/crypto/sha2.h>
39236c6e
A
74#include <libkern/libkern.h>
75
593a1d5f 76#include <security/mac_framework.h>
fe8ab488 77#include <stdbool.h>
593a1d5f 78
2d21ac55
A
79/* XXX These should be in a BSD accessible Mach header, but aren't. */
80extern kern_return_t memory_object_pages_resident(memory_object_control_t,
81 boolean_t *);
82extern kern_return_t memory_object_signed(memory_object_control_t control,
83 boolean_t is_signed);
39236c6e 84extern boolean_t memory_object_is_signed(memory_object_control_t);
6d2010ae 85
d9a64523
A
86/* XXX Same for those. */
87
2d21ac55
A
88extern void Debugger(const char *message);
89
90
91/* XXX no one uses this interface! */
92kern_return_t ubc_page_op_with_control(
93 memory_object_control_t control,
94 off_t f_offset,
95 int ops,
96 ppnum_t *phys_entryp,
97 int *flagsp);
98
99
1c79356b
A
100#if DIAGNOSTIC
101#if defined(assert)
b0d623f7 102#undef assert
1c79356b
A
103#endif
104#define assert(cond) \
2d21ac55 105 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
1c79356b
A
106#else
107#include <kern/assert.h>
108#endif /* DIAGNOSTIC */
109
2d21ac55 110static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
0c530ab8 111static int ubc_umcallback(vnode_t, void *);
0c530ab8 112static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
2d21ac55 113static void ubc_cs_free(struct ubc_info *uip);
b4c24cb9 114
39037602 115static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
d9a64523 116static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
39037602 117
91447636 118struct zone *ubc_info_zone;
fe8ab488 119static uint32_t cs_blob_generation_count = 1;
2d21ac55
A
120
121/*
122 * CODESIGNING
123 * Routines to navigate code signing data structures in the kernel...
124 */
b0d623f7
A
125
126extern int cs_debug;
127
fe8ab488 128#define PAGE_SHIFT_4K (12)
fe8ab488 129
2d21ac55
A
130static boolean_t
131cs_valid_range(
132 const void *start,
133 const void *end,
134 const void *lower_bound,
135 const void *upper_bound)
136{
137 if (upper_bound < lower_bound ||
138 end < start) {
139 return FALSE;
140 }
141
142 if (start < lower_bound ||
143 end > upper_bound) {
144 return FALSE;
145 }
146
147 return TRUE;
148}
149
3e170ce0
A
150typedef void (*cs_md_init)(void *ctx);
151typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
152typedef void (*cs_md_final)(void *hash, void *ctx);
153
154struct cs_hash {
490019cf
A
155 uint8_t cs_type; /* type code as per code signing */
156 size_t cs_size; /* size of effective hash (may be truncated) */
157 size_t cs_digest_size; /* size of native hash */
3e170ce0
A
158 cs_md_init cs_init;
159 cs_md_update cs_update;
160 cs_md_final cs_final;
161};
162
5ba3f43e
A
163uint8_t cs_hash_type(
164 struct cs_hash const * const cs_hash)
165{
166 return cs_hash->cs_type;
167}
168
d190cdc3 169static const struct cs_hash cs_hash_sha1 = {
3e170ce0 170 .cs_type = CS_HASHTYPE_SHA1,
3e170ce0
A
171 .cs_size = CS_SHA1_LEN,
172 .cs_digest_size = SHA_DIGEST_LENGTH,
173 .cs_init = (cs_md_init)SHA1Init,
174 .cs_update = (cs_md_update)SHA1Update,
175 .cs_final = (cs_md_final)SHA1Final,
176};
177#if CRYPTO_SHA2
d190cdc3 178static const struct cs_hash cs_hash_sha256 = {
3e170ce0 179 .cs_type = CS_HASHTYPE_SHA256,
3e170ce0
A
180 .cs_size = SHA256_DIGEST_LENGTH,
181 .cs_digest_size = SHA256_DIGEST_LENGTH,
182 .cs_init = (cs_md_init)SHA256_Init,
183 .cs_update = (cs_md_update)SHA256_Update,
184 .cs_final = (cs_md_final)SHA256_Final,
185};
d190cdc3 186static const struct cs_hash cs_hash_sha256_truncate = {
3e170ce0 187 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
3e170ce0
A
188 .cs_size = CS_SHA256_TRUNCATED_LEN,
189 .cs_digest_size = SHA256_DIGEST_LENGTH,
190 .cs_init = (cs_md_init)SHA256_Init,
191 .cs_update = (cs_md_update)SHA256_Update,
192 .cs_final = (cs_md_final)SHA256_Final,
193};
d190cdc3 194static const struct cs_hash cs_hash_sha384 = {
490019cf
A
195 .cs_type = CS_HASHTYPE_SHA384,
196 .cs_size = SHA384_DIGEST_LENGTH,
197 .cs_digest_size = SHA384_DIGEST_LENGTH,
198 .cs_init = (cs_md_init)SHA384_Init,
199 .cs_update = (cs_md_update)SHA384_Update,
200 .cs_final = (cs_md_final)SHA384_Final,
201};
3e170ce0 202#endif
39037602 203
d190cdc3 204static struct cs_hash const *
3e170ce0
A
205cs_find_md(uint8_t type)
206{
207 if (type == CS_HASHTYPE_SHA1) {
208 return &cs_hash_sha1;
209#if CRYPTO_SHA2
210 } else if (type == CS_HASHTYPE_SHA256) {
211 return &cs_hash_sha256;
212 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
213 return &cs_hash_sha256_truncate;
490019cf
A
214 } else if (type == CS_HASHTYPE_SHA384) {
215 return &cs_hash_sha384;
3e170ce0
A
216#endif
217 }
218 return NULL;
219}
220
221union cs_hash_union {
222 SHA1_CTX sha1ctxt;
223 SHA256_CTX sha256ctx;
490019cf 224 SHA384_CTX sha384ctx;
3e170ce0
A
225};
226
227
2d21ac55 228/*
490019cf
A
229 * Choose among different hash algorithms.
230 * Higher is better, 0 => don't use at all.
2d21ac55 231 */
d190cdc3 232static const uint32_t hashPriorities[] = {
490019cf
A
233 CS_HASHTYPE_SHA1,
234 CS_HASHTYPE_SHA256_TRUNCATED,
235 CS_HASHTYPE_SHA256,
236 CS_HASHTYPE_SHA384,
237};
b0d623f7 238
490019cf
A
239static unsigned int
240hash_rank(const CS_CodeDirectory *cd)
241{
242 uint32_t type = cd->hashType;
243 unsigned int n;
2d21ac55 244
490019cf
A
245 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n)
246 if (hashPriorities[n] == type)
247 return n + 1;
248 return 0; /* not supported */
2d21ac55
A
249}
250
251
252/*
253 * Locating a page hash
254 */
255static const unsigned char *
256hashes(
257 const CS_CodeDirectory *cd,
3e170ce0
A
258 uint32_t page,
259 size_t hash_len,
260 const char *lower_bound,
261 const char *upper_bound)
2d21ac55
A
262{
263 const unsigned char *base, *top, *hash;
b0d623f7 264 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
2d21ac55
A
265
266 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
267
39236c6e 268 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
b0d623f7 269 /* Get first scatter struct */
39236c6e 270 const SC_Scatter *scatter = (const SC_Scatter*)
b0d623f7
A
271 ((const char*)cd + ntohl(cd->scatterOffset));
272 uint32_t hashindex=0, scount, sbase=0;
273 /* iterate all scatter structs */
274 do {
275 if((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
276 if(cs_debug) {
277 printf("CODE SIGNING: Scatter extends past Code Directory\n");
278 }
279 return NULL;
280 }
281
282 scount = ntohl(scatter->count);
283 uint32_t new_base = ntohl(scatter->base);
284
285 /* last scatter? */
286 if (scount == 0) {
287 return NULL;
288 }
289
290 if((hashindex > 0) && (new_base <= sbase)) {
291 if(cs_debug) {
292 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
293 sbase, new_base);
294 }
295 return NULL; /* unordered scatter array */
296 }
297 sbase = new_base;
298
299 /* this scatter beyond page we're looking for? */
300 if (sbase > page) {
301 return NULL;
302 }
303
304 if (sbase+scount >= page) {
305 /* Found the scatter struct that is
306 * referencing our page */
307
308 /* base = address of first hash covered by scatter */
309 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
3e170ce0 310 hashindex * hash_len;
b0d623f7 311 /* top = address of first hash after this scatter */
3e170ce0 312 top = base + scount * hash_len;
b0d623f7
A
313 if (!cs_valid_range(base, top, lower_bound,
314 upper_bound) ||
315 hashindex > nCodeSlots) {
316 return NULL;
317 }
318
319 break;
320 }
321
322 /* this scatter struct is before the page we're looking
323 * for. Iterate. */
324 hashindex+=scount;
325 scatter++;
326 } while(1);
327
3e170ce0 328 hash = base + (page - sbase) * hash_len;
b0d623f7
A
329 } else {
330 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
3e170ce0 331 top = base + nCodeSlots * hash_len;
b0d623f7
A
332 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
333 page > nCodeSlots) {
334 return NULL;
335 }
336 assert(page < nCodeSlots);
2d21ac55 337
3e170ce0 338 hash = base + page * hash_len;
b0d623f7
A
339 }
340
3e170ce0 341 if (!cs_valid_range(hash, hash + hash_len,
2d21ac55
A
342 lower_bound, upper_bound)) {
343 hash = NULL;
344 }
345
346 return hash;
347}
39236c6e
A
348
349/*
350 * cs_validate_codedirectory
351 *
352 * Validate that pointers inside the code directory to make sure that
353 * all offsets and lengths are constrained within the buffer.
354 *
355 * Parameters: cd Pointer to code directory buffer
356 * length Length of buffer
357 *
358 * Returns: 0 Success
359 * EBADEXEC Invalid code signature
360 */
361
362static int
363cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
364{
d190cdc3 365 struct cs_hash const *hashtype;
39236c6e
A
366
367 if (length < sizeof(*cd))
368 return EBADEXEC;
369 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY)
370 return EBADEXEC;
39037602 371 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT)
39236c6e 372 return EBADEXEC;
3e170ce0
A
373 hashtype = cs_find_md(cd->hashType);
374 if (hashtype == NULL)
39236c6e
A
375 return EBADEXEC;
376
490019cf 377 if (cd->hashSize != hashtype->cs_size)
3e170ce0
A
378 return EBADEXEC;
379
39236c6e
A
380 if (length < ntohl(cd->hashOffset))
381 return EBADEXEC;
382
383 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
3e170ce0 384 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots))
39236c6e
A
385 return EBADEXEC;
386
387 /* check that codeslots fits in the buffer */
3e170ce0 388 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots))
39236c6e
A
389 return EBADEXEC;
390
391 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
392
393 if (length < ntohl(cd->scatterOffset))
394 return EBADEXEC;
395
3e170ce0
A
396 const SC_Scatter *scatter = (const SC_Scatter *)
397 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
39236c6e
A
398 uint32_t nPages = 0;
399
400 /*
401 * Check each scatter buffer, since we don't know the
402 * length of the scatter buffer array, we have to
403 * check each entry.
404 */
405 while(1) {
406 /* check that the end of each scatter buffer in within the length */
407 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length)
408 return EBADEXEC;
409 uint32_t scount = ntohl(scatter->count);
410 if (scount == 0)
411 break;
412 if (nPages + scount < nPages)
413 return EBADEXEC;
414 nPages += scount;
415 scatter++;
416
417 /* XXX check that basees doesn't overlap */
418 /* XXX check that targetOffset doesn't overlap */
419 }
420#if 0 /* rdar://12579439 */
421 if (nPages != ntohl(cd->nCodeSlots))
422 return EBADEXEC;
423#endif
424 }
425
426 if (length < ntohl(cd->identOffset))
427 return EBADEXEC;
428
429 /* identifier is NUL terminated string */
430 if (cd->identOffset) {
3e170ce0 431 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
39236c6e
A
432 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL)
433 return EBADEXEC;
434 }
435
fe8ab488
A
436 /* team identifier is NULL terminated string */
437 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
438 if (length < ntohl(cd->teamOffset))
439 return EBADEXEC;
440
3e170ce0 441 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
fe8ab488
A
442 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL)
443 return EBADEXEC;
444 }
445
39236c6e
A
446 return 0;
447}
448
449/*
450 *
451 */
452
453static int
454cs_validate_blob(const CS_GenericBlob *blob, size_t length)
455{
456 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length))
457 return EBADEXEC;
458 return 0;
459}
460
461/*
462 * cs_validate_csblob
463 *
464 * Validate that superblob/embedded code directory to make sure that
465 * all internal pointers are valid.
466 *
467 * Will validate both a superblob csblob and a "raw" code directory.
468 *
469 *
470 * Parameters: buffer Pointer to code signature
471 * length Length of buffer
472 * rcd returns pointer to code directory
473 *
474 * Returns: 0 Success
475 * EBADEXEC Invalid code signature
476 */
477
478static int
813fb2f6
A
479cs_validate_csblob(
480 const uint8_t *addr,
d9a64523 481 const size_t blob_size,
813fb2f6
A
482 const CS_CodeDirectory **rcd,
483 const CS_GenericBlob **rentitlements)
39236c6e 484{
813fb2f6 485 const CS_GenericBlob *blob;
39236c6e 486 int error;
d9a64523 487 size_t length;
39236c6e
A
488
489 *rcd = NULL;
39037602 490 *rentitlements = NULL;
39236c6e 491
813fb2f6 492 blob = (const CS_GenericBlob *)(const void *)addr;
813fb2f6
A
493
494 length = blob_size;
39236c6e
A
495 error = cs_validate_blob(blob, length);
496 if (error)
497 return error;
39236c6e
A
498 length = ntohl(blob->length);
499
500 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
490019cf
A
501 const CS_SuperBlob *sb;
502 uint32_t n, count;
503 const CS_CodeDirectory *best_cd = NULL;
504 unsigned int best_rank = 0;
5ba3f43e
A
505#if PLATFORM_WatchOS
506 const CS_CodeDirectory *sha1_cd = NULL;
507#endif
39236c6e
A
508
509 if (length < sizeof(CS_SuperBlob))
510 return EBADEXEC;
511
490019cf
A
512 sb = (const CS_SuperBlob *)blob;
513 count = ntohl(sb->count);
514
39236c6e
A
515 /* check that the array of BlobIndex fits in the rest of the data */
516 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count)
517 return EBADEXEC;
518
519 /* now check each BlobIndex */
520 for (n = 0; n < count; n++) {
521 const CS_BlobIndex *blobIndex = &sb->index[n];
490019cf
A
522 uint32_t type = ntohl(blobIndex->type);
523 uint32_t offset = ntohl(blobIndex->offset);
524 if (length < offset)
39236c6e
A
525 return EBADEXEC;
526
527 const CS_GenericBlob *subBlob =
490019cf 528 (const CS_GenericBlob *)(const void *)(addr + offset);
39236c6e 529
490019cf 530 size_t subLength = length - offset;
39236c6e
A
531
532 if ((error = cs_validate_blob(subBlob, subLength)) != 0)
533 return error;
534 subLength = ntohl(subBlob->length);
535
536 /* extra validation for CDs, that is also returned */
490019cf
A
537 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
538 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
539 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0)
39236c6e 540 return error;
490019cf
A
541 unsigned int rank = hash_rank(candidate);
542 if (cs_debug > 3)
543 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
544 if (best_cd == NULL || rank > best_rank) {
545 best_cd = candidate;
546 best_rank = rank;
39037602
A
547
548 if (cs_debug > 2)
549 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
550 *rcd = best_cd;
490019cf
A
551 } else if (best_cd != NULL && rank == best_rank) {
552 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
39037602
A
553 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
554 return EBADEXEC;
555 }
5ba3f43e
A
556#if PLATFORM_WatchOS
557 if (candidate->hashType == CS_HASHTYPE_SHA1) {
558 if (sha1_cd != NULL) {
559 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
560 return EBADEXEC;
561 }
562 sha1_cd = candidate;
563 }
564#endif
39037602
A
565 } else if (type == CSSLOT_ENTITLEMENTS) {
566 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
567 return EBADEXEC;
568 }
569 if (*rentitlements != NULL) {
570 printf("multiple entitlements blobs\n");
490019cf
A
571 return EBADEXEC;
572 }
39037602 573 *rentitlements = subBlob;
39236c6e
A
574 }
575 }
576
5ba3f43e
A
577#if PLATFORM_WatchOS
578 /* To keep watchOS fast enough, we have to resort to sha1 for
579 * some code.
580 *
581 * At the time of writing this comment, known sha1 attacks are
582 * collision attacks (not preimage or second preimage
583 * attacks), which do not apply to platform binaries since
584 * they have a fixed hash in the trust cache. Given this
585 * property, we only prefer sha1 code directories for adhoc
586 * signatures, which always have to be in a trust cache to be
587 * valid (can-load-cdhash does not exist for watchOS). Those
588 * are, incidentally, also the platform binaries, for which we
589 * care about the performance hit that sha256 would bring us.
590 *
591 * Platform binaries may still contain a (not chosen) sha256
592 * code directory, which keeps software updates that switch to
593 * sha256-only small.
594 */
595
596 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
597 if (sha1_cd->flags != (*rcd)->flags) {
598 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
599 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
600 *rcd = NULL;
601 return EBADEXEC;
602 }
603
604 *rcd = sha1_cd;
605 }
606#endif
607
39236c6e
A
608 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
609
3e170ce0 610 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0)
39236c6e
A
611 return error;
612 *rcd = (const CS_CodeDirectory *)blob;
613 } else {
614 return EBADEXEC;
615 }
616
617 if (*rcd == NULL)
618 return EBADEXEC;
619
620 return 0;
621}
622
623/*
624 * cs_find_blob_bytes
625 *
626 * Find an blob from the superblob/code directory. The blob must have
627 * been been validated by cs_validate_csblob() before calling
3e170ce0 628 * this. Use csblob_find_blob() instead.
39236c6e
A
629 *
630 * Will also find a "raw" code directory if its stored as well as
631 * searching the superblob.
632 *
633 * Parameters: buffer Pointer to code signature
634 * length Length of buffer
635 * type type of blob to find
636 * magic the magic number for that blob
637 *
638 * Returns: pointer Success
639 * NULL Buffer not found
640 */
641
3e170ce0
A
642const CS_GenericBlob *
643csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
39236c6e 644{
3e170ce0 645 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
39236c6e
A
646
647 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
648 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
649 size_t n, count = ntohl(sb->count);
650
651 for (n = 0; n < count; n++) {
652 if (ntohl(sb->index[n].type) != type)
653 continue;
654 uint32_t offset = ntohl(sb->index[n].offset);
655 if (length - sizeof(const CS_GenericBlob) < offset)
656 return NULL;
3e170ce0 657 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
39236c6e
A
658 if (ntohl(blob->magic) != magic)
659 continue;
660 return blob;
661 }
662 } else if (type == CSSLOT_CODEDIRECTORY
663 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
664 && magic == CSMAGIC_CODEDIRECTORY)
665 return blob;
666 return NULL;
667}
668
669
fe8ab488 670const CS_GenericBlob *
3e170ce0 671csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
39236c6e
A
672{
673 if ((csblob->csb_flags & CS_VALID) == 0)
674 return NULL;
3e170ce0 675 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
39236c6e
A
676}
677
678static const uint8_t *
3e170ce0 679find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
39236c6e
A
680{
681 /* there is no zero special slot since that is the first code slot */
682 if (ntohl(cd->nSpecialSlots) < slot || slot == 0)
683 return NULL;
684
3e170ce0 685 return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot));
39236c6e
A
686}
687
3e170ce0 688static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
39236c6e 689
6d2010ae 690int
3e170ce0 691csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
6d2010ae 692{
3e170ce0 693 uint8_t computed_hash[CS_HASH_MAX_SIZE];
39236c6e
A
694 const CS_GenericBlob *entitlements;
695 const CS_CodeDirectory *code_dir;
39236c6e 696 const uint8_t *embedded_hash;
3e170ce0 697 union cs_hash_union context;
39236c6e
A
698
699 *out_start = NULL;
700 *out_length = 0;
701
3e170ce0
A
702 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash))
703 return EBADEXEC;
39236c6e 704
490019cf 705 code_dir = csblob->csb_cd;
39236c6e 706
39037602
A
707 if ((csblob->csb_flags & CS_VALID) == 0) {
708 entitlements = NULL;
709 } else {
710 entitlements = csblob->csb_entitlements_blob;
711 }
3e170ce0 712 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
39236c6e
A
713
714 if (embedded_hash == NULL) {
715 if (entitlements)
716 return EBADEXEC;
717 return 0;
490019cf
A
718 } else if (entitlements == NULL) {
719 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
720 return EBADEXEC;
721 } else {
722 return 0;
723 }
6d2010ae 724 }
39236c6e 725
3e170ce0
A
726 csblob->csb_hashtype->cs_init(&context);
727 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
728 csblob->csb_hashtype->cs_final(computed_hash, &context);
729
730 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0)
39236c6e
A
731 return EBADEXEC;
732
3e170ce0 733 *out_start = __DECONST(void *, entitlements);
39236c6e
A
734 *out_length = ntohl(entitlements->length);
735
736 return 0;
737}
738
6d2010ae 739/*
3e170ce0
A
740 * CODESIGNING
741 * End of routines to navigate code signing data structures in the kernel.
6d2010ae
A
742 */
743
744
2d21ac55 745
1c79356b 746/*
2d21ac55
A
747 * ubc_init
748 *
749 * Initialization of the zone for Unified Buffer Cache.
750 *
751 * Parameters: (void)
752 *
753 * Returns: (void)
754 *
755 * Implicit returns:
756 * ubc_info_zone(global) initialized for subsequent allocations
1c79356b 757 */
0b4e3aa0 758__private_extern__ void
2d21ac55 759ubc_init(void)
1c79356b
A
760{
761 int i;
762
763 i = (vm_size_t) sizeof (struct ubc_info);
2d21ac55 764
1c79356b 765 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
0b4c1975
A
766
767 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
1c79356b
A
768}
769
2d21ac55 770
1c79356b 771/*
2d21ac55
A
772 * ubc_info_init
773 *
774 * Allocate and attach an empty ubc_info structure to a vnode
775 *
776 * Parameters: vp Pointer to the vnode
777 *
778 * Returns: 0 Success
779 * vnode_size:ENOMEM Not enough space
780 * vnode_size:??? Other error from vnode_getattr
781 *
1c79356b
A
782 */
783int
784ubc_info_init(struct vnode *vp)
91447636
A
785{
786 return(ubc_info_init_internal(vp, 0, 0));
787}
2d21ac55
A
788
789
790/*
791 * ubc_info_init_withsize
792 *
793 * Allocate and attach a sized ubc_info structure to a vnode
794 *
795 * Parameters: vp Pointer to the vnode
796 * filesize The size of the file
797 *
798 * Returns: 0 Success
799 * vnode_size:ENOMEM Not enough space
800 * vnode_size:??? Other error from vnode_getattr
801 */
91447636
A
802int
803ubc_info_init_withsize(struct vnode *vp, off_t filesize)
804{
805 return(ubc_info_init_internal(vp, 1, filesize));
806}
807
2d21ac55
A
808
809/*
810 * ubc_info_init_internal
811 *
812 * Allocate and attach a ubc_info structure to a vnode
813 *
814 * Parameters: vp Pointer to the vnode
815 * withfsize{0,1} Zero if the size should be obtained
816 * from the vnode; otherwise, use filesize
817 * filesize The size of the file, if withfsize == 1
818 *
819 * Returns: 0 Success
820 * vnode_size:ENOMEM Not enough space
821 * vnode_size:??? Other error from vnode_getattr
822 *
823 * Notes: We call a blocking zalloc(), and the zone was created as an
824 * expandable and collectable zone, so if no memory is available,
825 * it is possible for zalloc() to block indefinitely. zalloc()
826 * may also panic if the zone of zones is exhausted, since it's
827 * NOT expandable.
828 *
829 * We unconditionally call vnode_pager_setup(), even if this is
830 * a reuse of a ubc_info; in that case, we should probably assert
831 * that it does not already have a pager association, but do not.
832 *
833 * Since memory_object_create_named() can only fail from receiving
834 * an invalid pager argument, the explicit check and panic is
835 * merely precautionary.
836 */
837static int
838ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1c79356b 839{
39037602 840 struct ubc_info *uip;
1c79356b 841 void * pager;
1c79356b
A
842 int error = 0;
843 kern_return_t kret;
0b4e3aa0 844 memory_object_control_t control;
1c79356b 845
91447636 846 uip = vp->v_ubcinfo;
1c79356b 847
2d21ac55
A
848 /*
849 * If there is not already a ubc_info attached to the vnode, we
850 * attach one; otherwise, we will reuse the one that's there.
851 */
91447636 852 if (uip == UBC_INFO_NULL) {
1c79356b 853
1c79356b 854 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
855 bzero((char *)uip, sizeof(struct ubc_info));
856
1c79356b 857 uip->ui_vnode = vp;
91447636 858 uip->ui_flags = UI_INITED;
1c79356b
A
859 uip->ui_ucred = NOCRED;
860 }
1c79356b
A
861 assert(uip->ui_flags != UI_NONE);
862 assert(uip->ui_vnode == vp);
863
1c79356b
A
864 /* now set this ubc_info in the vnode */
865 vp->v_ubcinfo = uip;
91447636 866
2d21ac55
A
867 /*
868 * Allocate a pager object for this vnode
869 *
870 * XXX The value of the pager parameter is currently ignored.
871 * XXX Presumably, this API changed to avoid the race between
872 * XXX setting the pager and the UI_HASPAGER flag.
873 */
1c79356b
A
874 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
875 assert(pager);
91447636 876
2d21ac55
A
877 /*
878 * Explicitly set the pager into the ubc_info, after setting the
879 * UI_HASPAGER flag.
880 */
91447636
A
881 SET(uip->ui_flags, UI_HASPAGER);
882 uip->ui_pager = pager;
1c79356b
A
883
884 /*
91447636 885 * Note: We can not use VNOP_GETATTR() to get accurate
2d21ac55
A
886 * value of ui_size because this may be an NFS vnode, and
887 * nfs_getattr() can call vinvalbuf(); if this happens,
888 * ubc_info is not set up to deal with that event.
1c79356b
A
889 * So use bogus size.
890 */
891
1c79356b 892 /*
0b4e3aa0
A
893 * create a vnode - vm_object association
894 * memory_object_create_named() creates a "named" reference on the
895 * memory object we hold this reference as long as the vnode is
896 * "alive." Since memory_object_create_named() took its own reference
897 * on the vnode pager we passed it, we can drop the reference
898 * vnode_pager_setup() returned here.
1c79356b 899 */
0b4e3aa0
A
900 kret = memory_object_create_named(pager,
901 (memory_object_size_t)uip->ui_size, &control);
902 vnode_pager_deallocate(pager);
903 if (kret != KERN_SUCCESS)
904 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1c79356b 905
0b4e3aa0
A
906 assert(control);
907 uip->ui_control = control; /* cache the value of the mo control */
908 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
2d21ac55 909
91447636 910 if (withfsize == 0) {
91447636 911 /* initialize the size */
2d21ac55 912 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
91447636
A
913 if (error)
914 uip->ui_size = 0;
915 } else {
916 uip->ui_size = filesize;
917 }
2d21ac55 918 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
1c79356b 919
0b4e3aa0 920 return (error);
1c79356b
A
921}
922
2d21ac55
A
923
924/*
925 * ubc_info_free
926 *
927 * Free a ubc_info structure
928 *
929 * Parameters: uip A pointer to the ubc_info to free
930 *
931 * Returns: (void)
932 *
933 * Notes: If there is a credential that has subsequently been associated
934 * with the ubc_info via a call to ubc_setcred(), the reference
935 * to the credential is dropped.
936 *
937 * It's actually impossible for a ubc_info.ui_control to take the
938 * value MEMORY_OBJECT_CONTROL_NULL.
939 */
0b4e3aa0
A
940static void
941ubc_info_free(struct ubc_info *uip)
1c79356b 942{
0c530ab8
A
943 if (IS_VALID_CRED(uip->ui_ucred)) {
944 kauth_cred_unref(&uip->ui_ucred);
1c79356b 945 }
0b4e3aa0
A
946
947 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
948 memory_object_control_deallocate(uip->ui_control);
91447636
A
949
950 cluster_release(uip);
2d21ac55 951 ubc_cs_free(uip);
0b4e3aa0 952
2d21ac55 953 zfree(ubc_info_zone, uip);
1c79356b
A
954 return;
955}
956
2d21ac55 957
0b4e3aa0
A
958void
959ubc_info_deallocate(struct ubc_info *uip)
960{
91447636 961 ubc_info_free(uip);
0b4e3aa0
A
962}
963
3e170ce0 964errno_t mach_to_bsd_errno(kern_return_t mach_err)
fe8ab488
A
965{
966 switch (mach_err) {
967 case KERN_SUCCESS:
968 return 0;
969
970 case KERN_INVALID_ADDRESS:
971 case KERN_INVALID_ARGUMENT:
972 case KERN_NOT_IN_SET:
973 case KERN_INVALID_NAME:
974 case KERN_INVALID_TASK:
975 case KERN_INVALID_RIGHT:
976 case KERN_INVALID_VALUE:
977 case KERN_INVALID_CAPABILITY:
978 case KERN_INVALID_HOST:
979 case KERN_MEMORY_PRESENT:
980 case KERN_INVALID_PROCESSOR_SET:
981 case KERN_INVALID_POLICY:
982 case KERN_ALREADY_WAITING:
983 case KERN_DEFAULT_SET:
984 case KERN_EXCEPTION_PROTECTED:
985 case KERN_INVALID_LEDGER:
986 case KERN_INVALID_MEMORY_CONTROL:
987 case KERN_INVALID_SECURITY:
988 case KERN_NOT_DEPRESSED:
989 case KERN_LOCK_OWNED:
990 case KERN_LOCK_OWNED_SELF:
991 return EINVAL;
992
993 case KERN_PROTECTION_FAILURE:
994 case KERN_NOT_RECEIVER:
995 case KERN_NO_ACCESS:
996 case KERN_POLICY_STATIC:
997 return EACCES;
998
999 case KERN_NO_SPACE:
1000 case KERN_RESOURCE_SHORTAGE:
1001 case KERN_UREFS_OVERFLOW:
1002 case KERN_INVALID_OBJECT:
1003 return ENOMEM;
1004
1005 case KERN_FAILURE:
1006 return EIO;
1007
1008 case KERN_MEMORY_FAILURE:
1009 case KERN_POLICY_LIMIT:
1010 case KERN_CODESIGN_ERROR:
1011 return EPERM;
1012
1013 case KERN_MEMORY_ERROR:
1014 return EBUSY;
1015
1016 case KERN_ALREADY_IN_SET:
1017 case KERN_NAME_EXISTS:
1018 case KERN_RIGHT_EXISTS:
1019 return EEXIST;
1020
1021 case KERN_ABORTED:
1022 return EINTR;
1023
1024 case KERN_TERMINATED:
1025 case KERN_LOCK_SET_DESTROYED:
1026 case KERN_LOCK_UNSTABLE:
1027 case KERN_SEMAPHORE_DESTROYED:
1028 return ENOENT;
1029
1030 case KERN_RPC_SERVER_TERMINATED:
1031 return ECONNRESET;
1032
1033 case KERN_NOT_SUPPORTED:
1034 return ENOTSUP;
1035
1036 case KERN_NODE_DOWN:
1037 return ENETDOWN;
1038
1039 case KERN_NOT_WAITING:
1040 return ENOENT;
1041
1042 case KERN_OPERATION_TIMED_OUT:
1043 return ETIMEDOUT;
1044
1045 default:
1046 return EIO;
1047 }
1048}
2d21ac55 1049
1c79356b 1050/*
fe8ab488 1051 * ubc_setsize_ex
2d21ac55 1052 *
fe8ab488 1053 * Tell the VM that the the size of the file represented by the vnode has
2d21ac55
A
1054 * changed
1055 *
fe8ab488
A
1056 * Parameters: vp The vp whose backing file size is
1057 * being changed
1058 * nsize The new size of the backing file
1059 * opts Options
1060 *
1061 * Returns: EINVAL for new size < 0
1062 * ENOENT if no UBC info exists
1063 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1064 * Other errors (mapped to errno_t) returned by VM functions
1065 *
1066 * Notes: This function will indicate success if the new size is the
1067 * same or larger than the old size (in this case, the
1068 * remainder of the file will require modification or use of
1069 * an existing upl to access successfully).
1070 *
1071 * This function will fail if the new file size is smaller,
1072 * and the memory region being invalidated was unable to
1073 * actually be invalidated and/or the last page could not be
1074 * flushed, if the new size is not aligned to a page
1075 * boundary. This is usually indicative of an I/O error.
1c79356b 1076 */
fe8ab488 1077errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1c79356b
A
1078{
1079 off_t osize; /* ui_size before change */
1080 off_t lastpg, olastpgend, lastoff;
1081 struct ubc_info *uip;
0b4e3aa0 1082 memory_object_control_t control;
2d21ac55 1083 kern_return_t kret = KERN_SUCCESS;
1c79356b 1084
55e303ae 1085 if (nsize < (off_t)0)
fe8ab488 1086 return EINVAL;
1c79356b 1087
1c79356b 1088 if (!UBCINFOEXISTS(vp))
fe8ab488 1089 return ENOENT;
1c79356b
A
1090
1091 uip = vp->v_ubcinfo;
2d21ac55 1092 osize = uip->ui_size;
fe8ab488
A
1093
1094 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize)
1095 return EAGAIN;
1096
2d21ac55
A
1097 /*
1098 * Update the size before flushing the VM
1099 */
1c79356b
A
1100 uip->ui_size = nsize;
1101
b0d623f7 1102 if (nsize >= osize) { /* Nothing more to do */
6d2010ae
A
1103 if (nsize > osize) {
1104 lock_vnode_and_post(vp, NOTE_EXTEND);
1105 }
1106
fe8ab488 1107 return 0;
b0d623f7 1108 }
1c79356b
A
1109
1110 /*
1111 * When the file shrinks, invalidate the pages beyond the
1112 * new size. Also get rid of garbage beyond nsize on the
2d21ac55
A
1113 * last page. The ui_size already has the nsize, so any
1114 * subsequent page-in will zero-fill the tail properly
1c79356b 1115 */
1c79356b
A
1116 lastpg = trunc_page_64(nsize);
1117 olastpgend = round_page_64(osize);
0b4e3aa0
A
1118 control = uip->ui_control;
1119 assert(control);
1c79356b
A
1120 lastoff = (nsize & PAGE_MASK_64);
1121
2d21ac55 1122 if (lastoff) {
fe8ab488 1123 upl_t upl;
2d21ac55
A
1124 upl_page_info_t *pl;
1125
fe8ab488 1126 /*
2d21ac55 1127 * new EOF ends up in the middle of a page
fe8ab488 1128 * zero the tail of this page if it's currently
2d21ac55
A
1129 * present in the cache
1130 */
5ba3f43e 1131 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
fe8ab488 1132
1c79356b 1133 if (kret != KERN_SUCCESS)
2d21ac55
A
1134 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1135
1136 if (upl_valid_page(pl, 0))
1137 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1138
1139 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 1140
2d21ac55
A
1141 lastpg += PAGE_SIZE_64;
1142 }
1143 if (olastpgend > lastpg) {
b0d623f7
A
1144 int flags;
1145
1146 if (lastpg == 0)
1147 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1148 else
1149 flags = MEMORY_OBJECT_DATA_FLUSH;
fe8ab488 1150 /*
2d21ac55
A
1151 * invalidate the pages beyond the new EOF page
1152 *
1153 */
fe8ab488
A
1154 kret = memory_object_lock_request(control,
1155 (memory_object_offset_t)lastpg,
1156 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1157 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
2d21ac55
A
1158 if (kret != KERN_SUCCESS)
1159 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1160 }
fe8ab488 1161 return mach_to_bsd_errno(kret);
1c79356b
A
1162}
1163
fe8ab488
A
1164// Returns true for success
1165int ubc_setsize(vnode_t vp, off_t nsize)
1166{
1167 return ubc_setsize_ex(vp, nsize, 0) == 0;
1168}
2d21ac55 1169
1c79356b 1170/*
2d21ac55
A
1171 * ubc_getsize
1172 *
1173 * Get the size of the file assocated with the specified vnode
1174 *
1175 * Parameters: vp The vnode whose size is of interest
1176 *
1177 * Returns: 0 There is no ubc_info associated with
1178 * this vnode, or the size is zero
1179 * !0 The size of the file
1180 *
1181 * Notes: Using this routine, it is not possible for a caller to
1182 * successfully distinguish between a vnode associate with a zero
1183 * length file, and a vnode with no associated ubc_info. The
1184 * caller therefore needs to not care, or needs to ensure that
1185 * they have previously successfully called ubc_info_init() or
1186 * ubc_info_init_withsize().
1c79356b
A
1187 */
1188off_t
1189ubc_getsize(struct vnode *vp)
1190{
91447636
A
1191 /* people depend on the side effect of this working this way
1192 * as they call this for directory
1c79356b 1193 */
91447636
A
1194 if (!UBCINFOEXISTS(vp))
1195 return ((off_t)0);
1196 return (vp->v_ubcinfo->ui_size);
1c79356b
A
1197}
1198
2d21ac55 1199
1c79356b 1200/*
2d21ac55
A
1201 * ubc_umount
1202 *
fe8ab488 1203 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
2d21ac55
A
1204 * mount point
1205 *
1206 * Parameters: mp The mount point
1207 *
1208 * Returns: 0 Success
1209 *
1210 * Notes: There is no failure indication for this function.
1211 *
1212 * This function is used in the unmount path; since it may block
1213 * I/O indefinitely, it should not be used in the forced unmount
1214 * path, since a device unavailability could also block that
1215 * indefinitely.
1216 *
1217 * Because there is no device ejection interlock on USB, FireWire,
1218 * or similar devices, it's possible that an ejection that begins
1219 * subsequent to the vnode_iterate() completing, either on one of
1220 * those devices, or a network mount for which the server quits
1221 * responding, etc., may cause the caller to block indefinitely.
1c79356b 1222 */
0b4e3aa0 1223__private_extern__ int
1c79356b
A
1224ubc_umount(struct mount *mp)
1225{
91447636
A
1226 vnode_iterate(mp, 0, ubc_umcallback, 0);
1227 return(0);
1c79356b
A
1228}
1229
2d21ac55
A
1230
1231/*
1232 * ubc_umcallback
1233 *
1234 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1235 * and vnode_iterate() for details of implementation.
1236 */
91447636
A
1237static int
1238ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 1239{
1c79356b 1240
91447636
A
1241 if (UBCINFOEXISTS(vp)) {
1242
91447636 1243 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 1244 }
91447636 1245 return (VNODE_RETURNED);
1c79356b
A
1246}
1247
91447636 1248
2d21ac55
A
1249/*
1250 * ubc_getcred
1251 *
1252 * Get the credentials currently active for the ubc_info associated with the
1253 * vnode.
1254 *
1255 * Parameters: vp The vnode whose ubc_info credentials
1256 * are to be retrieved
1257 *
1258 * Returns: !NOCRED The credentials
1259 * NOCRED If there is no ubc_info for the vnode,
1260 * or if there is one, but it has not had
1261 * any credentials associated with it via
1262 * a call to ubc_setcred()
1263 */
91447636 1264kauth_cred_t
1c79356b
A
1265ubc_getcred(struct vnode *vp)
1266{
91447636
A
1267 if (UBCINFOEXISTS(vp))
1268 return (vp->v_ubcinfo->ui_ucred);
1c79356b 1269
91447636 1270 return (NOCRED);
1c79356b
A
1271}
1272
2d21ac55
A
1273
1274/*
1275 * ubc_setthreadcred
1276 *
1277 * If they are not already set, set the credentials of the ubc_info structure
1278 * associated with the vnode to those of the supplied thread; otherwise leave
1279 * them alone.
1280 *
1281 * Parameters: vp The vnode whose ubc_info creds are to
1282 * be set
1283 * p The process whose credentials are to
1284 * be used, if not running on an assumed
1285 * credential
1286 * thread The thread whose credentials are to
1287 * be used
1288 *
1289 * Returns: 1 This vnode has no associated ubc_info
1290 * 0 Success
1291 *
1292 * Notes: This function takes a proc parameter to account for bootstrap
1293 * issues where a task or thread may call this routine, either
1294 * before credentials have been initialized by bsd_init(), or if
1295 * there is no BSD info asscoiate with a mach thread yet. This
1296 * is known to happen in both the initial swap and memory mapping
1297 * calls.
1298 *
1299 * This function is generally used only in the following cases:
1300 *
1301 * o a memory mapped file via the mmap() system call
2d21ac55
A
1302 * o a swap store backing file
1303 * o subsequent to a successful write via vn_write()
1304 *
1305 * The information is then used by the NFS client in order to
1306 * cons up a wire message in either the page-in or page-out path.
1307 *
1308 * There are two potential problems with the use of this API:
1309 *
1310 * o Because the write path only set it on a successful
1311 * write, there is a race window between setting the
1312 * credential and its use to evict the pages to the
1313 * remote file server
1314 *
1315 * o Because a page-in may occur prior to a write, the
1316 * credential may not be set at this time, if the page-in
fe8ab488 1317 * is not the result of a mapping established via mmap().
2d21ac55
A
1318 *
1319 * In both these cases, this will be triggered from the paging
1320 * path, which will instead use the credential of the current
1321 * process, which in this case is either the dynamic_pager or
1322 * the kernel task, both of which utilize "root" credentials.
1323 *
1324 * This may potentially permit operations to occur which should
1325 * be denied, or it may cause to be denied operations which
1326 * should be permitted, depending on the configuration of the NFS
1327 * server.
1328 */
13fec989 1329int
2d21ac55 1330ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
13fec989
A
1331{
1332 struct ubc_info *uip;
1333 kauth_cred_t credp;
2d21ac55 1334 struct uthread *uthread = get_bsdthread_info(thread);
13fec989
A
1335
1336 if (!UBCINFOEXISTS(vp))
2d21ac55 1337 return (1);
13fec989
A
1338
1339 vnode_lock(vp);
1340
1341 uip = vp->v_ubcinfo;
1342 credp = uip->ui_ucred;
1343
0c530ab8 1344 if (!IS_VALID_CRED(credp)) {
13fec989
A
1345 /* use per-thread cred, if assumed identity, else proc cred */
1346 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1347 uip->ui_ucred = kauth_cred_proc_ref(p);
1348 } else {
1349 uip->ui_ucred = uthread->uu_ucred;
1350 kauth_cred_ref(uip->ui_ucred);
1351 }
2d21ac55 1352 }
13fec989
A
1353 vnode_unlock(vp);
1354
1355 return (0);
1356}
1357
2d21ac55 1358
1c79356b 1359/*
2d21ac55
A
1360 * ubc_setcred
1361 *
1362 * If they are not already set, set the credentials of the ubc_info structure
1363 * associated with the vnode to those of the process; otherwise leave them
1364 * alone.
1365 *
1366 * Parameters: vp The vnode whose ubc_info creds are to
1367 * be set
1368 * p The process whose credentials are to
1369 * be used
1370 *
1371 * Returns: 0 This vnode has no associated ubc_info
1372 * 1 Success
1373 *
1374 * Notes: The return values for this function are inverted from nearly
1375 * all other uses in the kernel.
1376 *
1377 * See also ubc_setthreadcred(), above.
1378 *
1379 * This function is considered deprecated, and generally should
1380 * not be used, as it is incompatible with per-thread credentials;
1381 * it exists for legacy KPI reasons.
1382 *
1383 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1384 * ubc_setthreadcred() instead.
1c79356b 1385 */
1c79356b 1386int
2d21ac55 1387ubc_setcred(struct vnode *vp, proc_t p)
1c79356b
A
1388{
1389 struct ubc_info *uip;
91447636 1390 kauth_cred_t credp;
1c79356b 1391
2d21ac55
A
1392 /* If there is no ubc_info, deny the operation */
1393 if ( !UBCINFOEXISTS(vp))
1c79356b 1394 return (0);
1c79356b 1395
2d21ac55
A
1396 /*
1397 * Check to see if there is already a credential reference in the
1398 * ubc_info; if there is not, take one on the supplied credential.
1399 */
91447636 1400 vnode_lock(vp);
91447636 1401 uip = vp->v_ubcinfo;
1c79356b 1402 credp = uip->ui_ucred;
0c530ab8 1403 if (!IS_VALID_CRED(credp)) {
91447636 1404 uip->ui_ucred = kauth_cred_proc_ref(p);
1c79356b 1405 }
91447636 1406 vnode_unlock(vp);
1c79356b
A
1407
1408 return (1);
1409}
1410
2d21ac55
A
1411/*
1412 * ubc_getpager
1413 *
1414 * Get the pager associated with the ubc_info associated with the vnode.
1415 *
1416 * Parameters: vp The vnode to obtain the pager from
1417 *
1418 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1419 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1420 *
1421 * Notes: For each vnode that has a ubc_info associated with it, that
1422 * ubc_info SHALL have a pager associated with it, so in the
1423 * normal case, it's impossible to return VNODE_PAGER_NULL for
1424 * a vnode with an associated ubc_info.
1425 */
0b4e3aa0 1426__private_extern__ memory_object_t
1c79356b
A
1427ubc_getpager(struct vnode *vp)
1428{
91447636
A
1429 if (UBCINFOEXISTS(vp))
1430 return (vp->v_ubcinfo->ui_pager);
1c79356b 1431
91447636 1432 return (0);
1c79356b
A
1433}
1434
2d21ac55 1435
1c79356b 1436/*
2d21ac55
A
1437 * ubc_getobject
1438 *
1439 * Get the memory object control associated with the ubc_info associated with
1440 * the vnode
1441 *
1442 * Parameters: vp The vnode to obtain the memory object
1443 * from
1444 * flags DEPRECATED
1445 *
1446 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1447 * MEMORY_OBJECT_CONTROL_NULL
1448 *
1449 * Notes: Historically, if the flags were not "do not reactivate", this
1450 * function would look up the memory object using the pager if
1451 * it did not exist (this could be the case if the vnode had
1452 * been previously reactivated). The flags would also permit a
1453 * hold to be requested, which would have created an object
1454 * reference, if one had not already existed. This usage is
1455 * deprecated, as it would permit a race between finding and
1456 * taking the reference vs. a single reference being dropped in
1457 * another thread.
1c79356b 1458 */
0b4e3aa0 1459memory_object_control_t
91447636 1460ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 1461{
91447636
A
1462 if (UBCINFOEXISTS(vp))
1463 return((vp->v_ubcinfo->ui_control));
1c79356b 1464
2d21ac55 1465 return (MEMORY_OBJECT_CONTROL_NULL);
1c79356b
A
1466}
1467
2d21ac55
A
1468/*
1469 * ubc_blktooff
1470 *
1471 * Convert a given block number to a memory backing object (file) offset for a
1472 * given vnode
1473 *
1474 * Parameters: vp The vnode in which the block is located
1475 * blkno The block number to convert
1476 *
1477 * Returns: !-1 The offset into the backing object
1478 * -1 There is no ubc_info associated with
1479 * the vnode
1480 * -1 An error occurred in the underlying VFS
1481 * while translating the block to an
1482 * offset; the most likely cause is that
1483 * the caller specified a block past the
1484 * end of the file, but this could also be
1485 * any other error from VNOP_BLKTOOFF().
1486 *
1487 * Note: Representing the error in band loses some information, but does
1488 * not occlude a valid offset, since an off_t of -1 is normally
1489 * used to represent EOF. If we had a more reliable constant in
1490 * our header files for it (i.e. explicitly cast to an off_t), we
1491 * would use it here instead.
1492 */
1c79356b 1493off_t
91447636 1494ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b 1495{
2d21ac55 1496 off_t file_offset = -1;
1c79356b
A
1497 int error;
1498
2d21ac55
A
1499 if (UBCINFOEXISTS(vp)) {
1500 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1501 if (error)
1502 file_offset = -1;
1503 }
1c79356b
A
1504
1505 return (file_offset);
1506}
0b4e3aa0 1507
2d21ac55
A
1508
1509/*
1510 * ubc_offtoblk
1511 *
1512 * Convert a given offset in a memory backing object into a block number for a
1513 * given vnode
1514 *
1515 * Parameters: vp The vnode in which the offset is
1516 * located
1517 * offset The offset into the backing object
1518 *
1519 * Returns: !-1 The returned block number
1520 * -1 There is no ubc_info associated with
1521 * the vnode
1522 * -1 An error occurred in the underlying VFS
1523 * while translating the block to an
1524 * offset; the most likely cause is that
1525 * the caller specified a block past the
1526 * end of the file, but this could also be
1527 * any other error from VNOP_OFFTOBLK().
1528 *
1529 * Note: Representing the error in band loses some information, but does
1530 * not occlude a valid block number, since block numbers exceed
1531 * the valid range for offsets, due to their relative sizes. If
1532 * we had a more reliable constant than -1 in our header files
1533 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1534 * here instead.
1535 */
91447636
A
1536daddr64_t
1537ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 1538{
2d21ac55 1539 daddr64_t blkno = -1;
0b4e3aa0 1540 int error = 0;
1c79356b 1541
2d21ac55
A
1542 if (UBCINFOEXISTS(vp)) {
1543 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1544 if (error)
1545 blkno = -1;
1546 }
1c79356b
A
1547
1548 return (blkno);
1549}
1550
2d21ac55
A
1551
1552/*
1553 * ubc_pages_resident
1554 *
1555 * Determine whether or not a given vnode has pages resident via the memory
1556 * object control associated with the ubc_info associated with the vnode
1557 *
1558 * Parameters: vp The vnode we want to know about
1559 *
1560 * Returns: 1 Yes
1561 * 0 No
1562 */
1c79356b 1563int
91447636 1564ubc_pages_resident(vnode_t vp)
1c79356b 1565{
91447636
A
1566 kern_return_t kret;
1567 boolean_t has_pages_resident;
1568
2d21ac55 1569 if (!UBCINFOEXISTS(vp))
0b4e3aa0 1570 return (0);
91447636 1571
2d21ac55
A
1572 /*
1573 * The following call may fail if an invalid ui_control is specified,
1574 * or if there is no VM object associated with the control object. In
1575 * either case, reacting to it as if there were no pages resident will
1576 * result in correct behavior.
1577 */
91447636
A
1578 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1579
1580 if (kret != KERN_SUCCESS)
0b4e3aa0 1581 return (0);
91447636
A
1582
1583 if (has_pages_resident == TRUE)
1584 return (1);
1585
1586 return (0);
1587}
1c79356b 1588
0b4e3aa0 1589/*
2d21ac55
A
1590 * ubc_msync
1591 *
1592 * Clean and/or invalidate a range in the memory object that backs this vnode
1593 *
1594 * Parameters: vp The vnode whose associated ubc_info's
1595 * associated memory object is to have a
1596 * range invalidated within it
1597 * beg_off The start of the range, as an offset
1598 * end_off The end of the range, as an offset
1599 * resid_off The address of an off_t supplied by the
1600 * caller; may be set to NULL to ignore
1601 * flags See ubc_msync_internal()
1602 *
1603 * Returns: 0 Success
1604 * !0 Failure; an errno is returned
1605 *
1606 * Implicit Returns:
1607 * *resid_off, modified If non-NULL, the contents are ALWAYS
1608 * modified; they are initialized to the
1609 * beg_off, and in case of an I/O error,
1610 * the difference between beg_off and the
1611 * current value will reflect what was
1612 * able to be written before the error
1613 * occurred. If no error is returned, the
1614 * value of the resid_off is undefined; do
1615 * NOT use it in place of end_off if you
1616 * intend to increment from the end of the
1617 * last call and call iteratively.
1618 *
1619 * Notes: see ubc_msync_internal() for more detailed information.
1620 *
0b4e3aa0 1621 */
91447636
A
1622errno_t
1623ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 1624{
91447636
A
1625 int retval;
1626 int io_errno = 0;
1627
1628 if (resid_off)
1629 *resid_off = beg_off;
0b4e3aa0 1630
91447636 1631 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 1632
91447636
A
1633 if (retval == 0 && io_errno == 0)
1634 return (EINVAL);
1635 return (io_errno);
1636}
0b4e3aa0 1637
1c79356b 1638
1c79356b 1639/*
fe8ab488
A
1640 * ubc_msync_internal
1641 *
2d21ac55
A
1642 * Clean and/or invalidate a range in the memory object that backs this vnode
1643 *
1644 * Parameters: vp The vnode whose associated ubc_info's
1645 * associated memory object is to have a
1646 * range invalidated within it
1647 * beg_off The start of the range, as an offset
1648 * end_off The end of the range, as an offset
1649 * resid_off The address of an off_t supplied by the
1650 * caller; may be set to NULL to ignore
1651 * flags MUST contain at least one of the flags
1652 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1653 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1654 * UBC_SYNC may also be specified to cause
1655 * this function to block until the
1656 * operation is complete. The behavior
1657 * of UBC_SYNC is otherwise undefined.
1658 * io_errno The address of an int to contain the
1659 * errno from a failed I/O operation, if
1660 * one occurs; may be set to NULL to
1661 * ignore
1662 *
1663 * Returns: 1 Success
1664 * 0 Failure
1665 *
1666 * Implicit Returns:
1667 * *resid_off, modified The contents of this offset MAY be
1668 * modified; in case of an I/O error, the
1669 * difference between beg_off and the
1670 * current value will reflect what was
1671 * able to be written before the error
1672 * occurred.
1673 * *io_errno, modified The contents of this offset are set to
1674 * an errno, if an error occurs; if the
1675 * caller supplies an io_errno parameter,
1676 * they should be careful to initialize it
1677 * to 0 before calling this function to
1678 * enable them to distinguish an error
1679 * with a valid *resid_off from an invalid
1680 * one, and to avoid potentially falsely
1681 * reporting an error, depending on use.
1682 *
1683 * Notes: If there is no ubc_info associated with the vnode supplied,
1684 * this function immediately returns success.
1685 *
1686 * If the value of end_off is less than or equal to beg_off, this
1687 * function immediately returns success; that is, end_off is NOT
1688 * inclusive.
1689 *
1690 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1691 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1692 * attempt to block on in-progress I/O by calling this function
1693 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1694 * in order to block pending on the I/O already in progress.
1695 *
1696 * The start offset is truncated to the page boundary and the
1697 * size is adjusted to include the last page in the range; that
1698 * is, end_off on exactly a page boundary will not change if it
1699 * is rounded, and the range of bytes written will be from the
1700 * truncate beg_off to the rounded (end_off - 1).
1c79356b 1701 */
91447636
A
1702static int
1703ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 1704{
91447636
A
1705 memory_object_size_t tsize;
1706 kern_return_t kret;
1707 int request_flags = 0;
1708 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
1709
1710 if ( !UBCINFOEXISTS(vp))
1711 return (0);
91447636
A
1712 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
1713 return (0);
2d21ac55
A
1714 if (end_off <= beg_off)
1715 return (1);
91447636
A
1716
1717 if (flags & UBC_INVALIDATE)
1718 /*
1719 * discard the resident pages
1720 */
1721 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1c79356b 1722
91447636
A
1723 if (flags & UBC_SYNC)
1724 /*
1725 * wait for all the I/O to complete before returning
55e303ae 1726 */
91447636 1727 request_flags |= MEMORY_OBJECT_IO_SYNC;
55e303ae 1728
91447636
A
1729 if (flags & UBC_PUSHDIRTY)
1730 /*
1731 * we only return the dirty pages in the range
1732 */
1733 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
0b4e3aa0 1734
91447636
A
1735 if (flags & UBC_PUSHALL)
1736 /*
2d21ac55
A
1737 * then return all the interesting pages in the range (both
1738 * dirty and precious) to the pager
91447636
A
1739 */
1740 flush_flags = MEMORY_OBJECT_RETURN_ALL;
0b4e3aa0 1741
91447636
A
1742 beg_off = trunc_page_64(beg_off);
1743 end_off = round_page_64(end_off);
1744 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 1745
91447636
A
1746 /* flush and/or invalidate pages in the range requested */
1747 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
2d21ac55
A
1748 beg_off, tsize,
1749 (memory_object_offset_t *)resid_off,
1750 io_errno, flush_flags, request_flags,
1751 VM_PROT_NO_CHANGE);
91447636
A
1752
1753 return ((kret == KERN_SUCCESS) ? 1 : 0);
1c79356b
A
1754}
1755
1c79356b
A
1756
1757/*
fe8ab488 1758 * ubc_map
2d21ac55
A
1759 *
1760 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1761 * to it for the ubc system, if there isn't one already, so it will not be
1762 * recycled while it's in use, and set flags on the ubc_info to indicate that
1763 * we have done this
1764 *
1765 * Parameters: vp The vnode to map
1766 * flags The mapping flags for the vnode; this
1767 * will be a combination of one or more of
1768 * PROT_READ, PROT_WRITE, and PROT_EXEC
1769 *
1770 * Returns: 0 Success
1771 * EPERM Permission was denied
1772 *
1773 * Notes: An I/O reference on the vnode must already be held on entry
1774 *
1775 * If there is no ubc_info associated with the vnode, this function
1776 * will return success.
1777 *
1778 * If a permission error occurs, this function will return
1779 * failure; all other failures will cause this function to return
1780 * success.
1781 *
1782 * IMPORTANT: This is an internal use function, and its symbols
1783 * are not exported, hence its error checking is not very robust.
1784 * It is primarily used by:
1785 *
1786 * o mmap(), when mapping a file
2d21ac55
A
1787 * o When mapping a shared file (a shared library in the
1788 * shared segment region)
1789 * o When loading a program image during the exec process
1790 *
1791 * ...all of these uses ignore the return code, and any fault that
1792 * results later because of a failure is handled in the fix-up path
1793 * of the fault handler. The interface exists primarily as a
1794 * performance hint.
1795 *
1796 * Given that third party implementation of the type of interfaces
1797 * that would use this function, such as alternative executable
1798 * formats, etc., are unsupported, this function is not exported
1799 * for general use.
1800 *
1801 * The extra reference is held until the VM system unmaps the
1802 * vnode from its own context to maintain a vnode reference in
1803 * cases like open()/mmap()/close(), which leave the backing
1804 * object referenced by a mapped memory region in a process
1805 * address space.
1c79356b 1806 */
91447636
A
1807__private_extern__ int
1808ubc_map(vnode_t vp, int flags)
1c79356b
A
1809{
1810 struct ubc_info *uip;
91447636
A
1811 int error = 0;
1812 int need_ref = 0;
2d21ac55 1813 int need_wakeup = 0;
1c79356b 1814
91447636 1815 if (UBCINFOEXISTS(vp)) {
1c79356b 1816
2d21ac55
A
1817 vnode_lock(vp);
1818 uip = vp->v_ubcinfo;
1819
1820 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1821 SET(uip->ui_flags, UI_MAPWAITING);
1822 (void) msleep(&uip->ui_flags, &vp->v_lock,
1823 PRIBIO, "ubc_map", NULL);
1824 }
1825 SET(uip->ui_flags, UI_MAPBUSY);
1826 vnode_unlock(vp);
1827
1828 error = VNOP_MMAP(vp, flags, vfs_context_current());
1c79356b 1829
39037602
A
1830 /*
1831 * rdar://problem/22587101 required that we stop propagating
1832 * EPERM up the stack. Otherwise, we would have to funnel up
1833 * the error at all the call sites for memory_object_map().
1834 * The risk is in having to undo the map/object/entry state at
1835 * all these call sites. It would also affect more than just mmap()
1836 * e.g. vm_remap().
1837 *
1838 * if (error != EPERM)
1839 * error = 0;
1840 */
1841
1842 error = 0;
1c79356b 1843
2d21ac55 1844 vnode_lock_spin(vp);
1c79356b 1845
2d21ac55 1846 if (error == 0) {
91447636
A
1847 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
1848 need_ref = 1;
1849 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
22ba694c
A
1850 if (flags & PROT_WRITE) {
1851 SET(uip->ui_flags, UI_MAPPEDWRITE);
1852 }
2d21ac55
A
1853 }
1854 CLR(uip->ui_flags, UI_MAPBUSY);
55e303ae 1855
2d21ac55
A
1856 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1857 CLR(uip->ui_flags, UI_MAPWAITING);
1858 need_wakeup = 1;
55e303ae 1859 }
2d21ac55 1860 vnode_unlock(vp);
b4c24cb9 1861
2d21ac55
A
1862 if (need_wakeup)
1863 wakeup(&uip->ui_flags);
1864
39037602
A
1865 if (need_ref) {
1866 /*
1867 * Make sure we get a ref as we can't unwind from here
1868 */
1869 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE))
1870 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
1871 }
2d21ac55 1872 }
91447636 1873 return (error);
0b4e3aa0
A
1874}
1875
2d21ac55 1876
0b4e3aa0 1877/*
2d21ac55
A
1878 * ubc_destroy_named
1879 *
1880 * Destroy the named memory object associated with the ubc_info control object
1881 * associated with the designated vnode, if there is a ubc_info associated
1882 * with the vnode, and a control object is associated with it
1883 *
1884 * Parameters: vp The designated vnode
1885 *
1886 * Returns: (void)
1887 *
1888 * Notes: This function is called on vnode termination for all vnodes,
1889 * and must therefore not assume that there is a ubc_info that is
1890 * associated with the vnode, nor that there is a control object
1891 * associated with the ubc_info.
1892 *
1893 * If all the conditions necessary are present, this function
1894 * calls memory_object_destory(), which will in turn end up
1895 * calling ubc_unmap() to release any vnode references that were
1896 * established via ubc_map().
1897 *
1898 * IMPORTANT: This is an internal use function that is used
1899 * exclusively by the internal use function vclean().
0b4e3aa0 1900 */
2d21ac55
A
1901__private_extern__ void
1902ubc_destroy_named(vnode_t vp)
0b4e3aa0
A
1903{
1904 memory_object_control_t control;
0b4e3aa0
A
1905 struct ubc_info *uip;
1906 kern_return_t kret;
1907
2d21ac55
A
1908 if (UBCINFOEXISTS(vp)) {
1909 uip = vp->v_ubcinfo;
1910
1911 /* Terminate the memory object */
1912 control = ubc_getobject(vp, UBC_HOLDOBJECT);
1913 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1914 kret = memory_object_destroy(control, 0);
1915 if (kret != KERN_SUCCESS)
1916 panic("ubc_destroy_named: memory_object_destroy failed");
0b4e3aa0
A
1917 }
1918 }
1c79356b
A
1919}
1920
0b4e3aa0 1921
1c79356b 1922/*
2d21ac55
A
1923 * ubc_isinuse
1924 *
1925 * Determine whether or not a vnode is currently in use by ubc at a level in
1926 * excess of the requested busycount
1927 *
1928 * Parameters: vp The vnode to check
1929 * busycount The threshold busy count, used to bias
1930 * the count usually already held by the
1931 * caller to avoid races
1932 *
1933 * Returns: 1 The vnode is in use over the threshold
1934 * 0 The vnode is not in use over the
1935 * threshold
1936 *
1937 * Notes: Because the vnode is only held locked while actually asking
1938 * the use count, this function only represents a snapshot of the
1939 * current state of the vnode. If more accurate information is
1940 * required, an additional busycount should be held by the caller
1941 * and a non-zero busycount used.
1942 *
1943 * If there is no ubc_info associated with the vnode, this
1944 * function will report that the vnode is not in use by ubc.
1c79356b
A
1945 */
1946int
91447636 1947ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 1948{
91447636 1949 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 1950 return (0);
91447636 1951 return(ubc_isinuse_locked(vp, busycount, 0));
1c79356b
A
1952}
1953
91447636 1954
2d21ac55
A
1955/*
1956 * ubc_isinuse_locked
1957 *
1958 * Determine whether or not a vnode is currently in use by ubc at a level in
1959 * excess of the requested busycount
1960 *
1961 * Parameters: vp The vnode to check
1962 * busycount The threshold busy count, used to bias
1963 * the count usually already held by the
1964 * caller to avoid races
1965 * locked True if the vnode is already locked by
1966 * the caller
1967 *
1968 * Returns: 1 The vnode is in use over the threshold
1969 * 0 The vnode is not in use over the
1970 * threshold
1971 *
1972 * Notes: If the vnode is not locked on entry, it is locked while
1973 * actually asking the use count. If this is the case, this
1974 * function only represents a snapshot of the current state of
1975 * the vnode. If more accurate information is required, the
1976 * vnode lock should be held by the caller, otherwise an
1977 * additional busycount should be held by the caller and a
1978 * non-zero busycount used.
1979 *
1980 * If there is no ubc_info associated with the vnode, this
1981 * function will report that the vnode is not in use by ubc.
1982 */
1c79356b 1983int
91447636 1984ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 1985{
91447636 1986 int retval = 0;
1c79356b 1987
9bccf70c 1988
91447636 1989 if (!locked)
b0d623f7 1990 vnode_lock_spin(vp);
1c79356b 1991
91447636
A
1992 if ((vp->v_usecount - vp->v_kusecount) > busycount)
1993 retval = 1;
1994
1995 if (!locked)
1996 vnode_unlock(vp);
1997 return (retval);
1c79356b
A
1998}
1999
91447636 2000
1c79356b 2001/*
2d21ac55
A
2002 * ubc_unmap
2003 *
2004 * Reverse the effects of a ubc_map() call for a given vnode
2005 *
2006 * Parameters: vp vnode to unmap from ubc
2007 *
2008 * Returns: (void)
2009 *
2010 * Notes: This is an internal use function used by vnode_pager_unmap().
2011 * It will attempt to obtain a reference on the supplied vnode,
2012 * and if it can do so, and there is an associated ubc_info, and
2013 * the flags indicate that it was mapped via ubc_map(), then the
2014 * flag is cleared, the mapping removed, and the reference taken
2015 * by ubc_map() is released.
2016 *
2017 * IMPORTANT: This MUST only be called by the VM
2018 * to prevent race conditions.
1c79356b 2019 */
0b4e3aa0 2020__private_extern__ void
1c79356b
A
2021ubc_unmap(struct vnode *vp)
2022{
2023 struct ubc_info *uip;
91447636 2024 int need_rele = 0;
2d21ac55 2025 int need_wakeup = 0;
b0d623f7 2026
91447636
A
2027 if (vnode_getwithref(vp))
2028 return;
1c79356b 2029
91447636 2030 if (UBCINFOEXISTS(vp)) {
fe8ab488
A
2031 bool want_fsevent = false;
2032
91447636 2033 vnode_lock(vp);
91447636 2034 uip = vp->v_ubcinfo;
2d21ac55
A
2035
2036 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2037 SET(uip->ui_flags, UI_MAPWAITING);
2038 (void) msleep(&uip->ui_flags, &vp->v_lock,
2039 PRIBIO, "ubc_unmap", NULL);
2040 }
2041 SET(uip->ui_flags, UI_MAPBUSY);
2042
91447636 2043 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
fe8ab488
A
2044 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE))
2045 want_fsevent = true;
2046
91447636 2047 need_rele = 1;
fe8ab488
A
2048
2049 /*
2050 * We want to clear the mapped flags after we've called
2051 * VNOP_MNOMAP to avoid certain races and allow
2052 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2053 */
91447636
A
2054 }
2055 vnode_unlock(vp);
fe8ab488 2056
91447636 2057 if (need_rele) {
fe8ab488
A
2058 vfs_context_t ctx = vfs_context_current();
2059
2060 (void)VNOP_MNOMAP(vp, ctx);
2061
2062#if CONFIG_FSE
2063 /*
2064 * Why do we want an fsevent here? Normally the
2065 * content modified fsevent is posted when a file is
2066 * closed and only if it's written to via conventional
2067 * means. It's perfectly legal to close a file and
2068 * keep your mappings and we don't currently track
2069 * whether it was written to via a mapping.
2070 * Therefore, we need to post an fsevent here if the
2071 * file was mapped writable. This may result in false
2072 * events, i.e. we post a notification when nothing
2073 * has really changed.
2074 */
2075 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2076 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2077 FSE_ARG_VNODE, vp,
2078 FSE_ARG_DONE);
2079 }
2080#endif
2081
b0d623f7 2082 vnode_rele(vp);
91447636 2083 }
2d21ac55
A
2084
2085 vnode_lock_spin(vp);
2086
fe8ab488
A
2087 if (need_rele)
2088 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2089
2d21ac55 2090 CLR(uip->ui_flags, UI_MAPBUSY);
fe8ab488 2091
2d21ac55
A
2092 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2093 CLR(uip->ui_flags, UI_MAPWAITING);
2094 need_wakeup = 1;
2095 }
2096 vnode_unlock(vp);
2097
2098 if (need_wakeup)
b0d623f7 2099 wakeup(&uip->ui_flags);
2d21ac55 2100
91447636
A
2101 }
2102 /*
2103 * the drop of the vnode ref will cleanup
2104 */
2105 vnode_put(vp);
0b4e3aa0
A
2106}
2107
2d21ac55
A
2108
2109/*
2110 * ubc_page_op
2111 *
2112 * Manipulate individual page state for a vnode with an associated ubc_info
2113 * with an associated memory object control.
2114 *
2115 * Parameters: vp The vnode backing the page
2116 * f_offset A file offset interior to the page
2117 * ops The operations to perform, as a bitmap
2118 * (see below for more information)
2119 * phys_entryp The address of a ppnum_t; may be NULL
2120 * to ignore
2121 * flagsp A pointer to an int to contain flags;
2122 * may be NULL to ignore
2123 *
2124 * Returns: KERN_SUCCESS Success
2125 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2126 * object associated
2127 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2128 * not physically contiguous
2129 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2130 * physically contiguous
2131 * KERN_FAILURE If the page cannot be looked up
2132 *
2133 * Implicit Returns:
2134 * *phys_entryp (modified) If phys_entryp is non-NULL and
2135 * UPL_POP_PHYSICAL
2136 * *flagsp (modified) If flagsp is non-NULL and there was
2137 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2138 *
2139 * Notes: For object boundaries, it is considerably more efficient to
2140 * ensure that f_offset is in fact on a page boundary, as this
2141 * will avoid internal use of the hash table to identify the
2142 * page, and would therefore skip a number of early optimizations.
2143 * Since this is a page operation anyway, the caller should try
2144 * to pass only a page aligned offset because of this.
2145 *
2146 * *flagsp may be modified even if this function fails. If it is
2147 * modified, it will contain the condition of the page before the
2148 * requested operation was attempted; these will only include the
2149 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2150 * UPL_POP_SET, or UPL_POP_CLR bits.
2151 *
2152 * The flags field may contain a specific operation, such as
2153 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2154 *
2155 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2156 * *phys_entryp and successful, set
2157 * *phys_entryp
2158 * o UPL_POP_DUMP Dump the specified page
2159 *
2160 * Otherwise, it is treated as a bitmap of one or more page
2161 * operations to perform on the final memory object; allowable
2162 * bit values are:
2163 *
2164 * o UPL_POP_DIRTY The page is dirty
2165 * o UPL_POP_PAGEOUT The page is paged out
2166 * o UPL_POP_PRECIOUS The page is precious
2167 * o UPL_POP_ABSENT The page is absent
2168 * o UPL_POP_BUSY The page is busy
2169 *
2170 * If the page status is only being queried and not modified, then
2171 * not other bits should be specified. However, if it is being
2172 * modified, exactly ONE of the following bits should be set:
2173 *
2174 * o UPL_POP_SET Set the current bitmap bits
2175 * o UPL_POP_CLR Clear the current bitmap bits
2176 *
2177 * Thus to effect a combination of setting an clearing, it may be
2178 * necessary to call this function twice. If this is done, the
2179 * set should be used before the clear, since clearing may trigger
2180 * a wakeup on the destination page, and if the page is backed by
2181 * an encrypted swap file, setting will trigger the decryption
2182 * needed before the wakeup occurs.
2183 */
0b4e3aa0
A
2184kern_return_t
2185ubc_page_op(
2186 struct vnode *vp,
2187 off_t f_offset,
2188 int ops,
55e303ae 2189 ppnum_t *phys_entryp,
0b4e3aa0
A
2190 int *flagsp)
2191{
2192 memory_object_control_t control;
2193
2194 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2195 if (control == MEMORY_OBJECT_CONTROL_NULL)
2196 return KERN_INVALID_ARGUMENT;
2197
2198 return (memory_object_page_op(control,
2199 (memory_object_offset_t)f_offset,
2200 ops,
2201 phys_entryp,
2202 flagsp));
2203}
2d21ac55
A
2204
2205
2206/*
2207 * ubc_range_op
2208 *
2209 * Manipulate page state for a range of memory for a vnode with an associated
2210 * ubc_info with an associated memory object control, when page level state is
2211 * not required to be returned from the call (i.e. there are no phys_entryp or
2212 * flagsp parameters to this call, and it takes a range which may contain
2213 * multiple pages, rather than an offset interior to a single page).
2214 *
2215 * Parameters: vp The vnode backing the page
2216 * f_offset_beg A file offset interior to the start page
2217 * f_offset_end A file offset interior to the end page
2218 * ops The operations to perform, as a bitmap
2219 * (see below for more information)
2220 * range The address of an int; may be NULL to
2221 * ignore
2222 *
2223 * Returns: KERN_SUCCESS Success
2224 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2225 * object associated
2226 * KERN_INVALID_OBJECT If the object is physically contiguous
2227 *
2228 * Implicit Returns:
2229 * *range (modified) If range is non-NULL, its contents will
2230 * be modified to contain the number of
2231 * bytes successfully operated upon.
2232 *
2233 * Notes: IMPORTANT: This function cannot be used on a range that
2234 * consists of physically contiguous pages.
2235 *
2236 * For object boundaries, it is considerably more efficient to
2237 * ensure that f_offset_beg and f_offset_end are in fact on page
2238 * boundaries, as this will avoid internal use of the hash table
2239 * to identify the page, and would therefore skip a number of
2240 * early optimizations. Since this is an operation on a set of
2241 * pages anyway, the caller should try to pass only a page aligned
2242 * offsets because of this.
2243 *
2244 * *range will be modified only if this function succeeds.
2245 *
2246 * The flags field MUST contain a specific operation; allowable
2247 * values are:
2248 *
2249 * o UPL_ROP_ABSENT Returns the extent of the range
2250 * presented which is absent, starting
2251 * with the start address presented
2252 *
2253 * o UPL_ROP_PRESENT Returns the extent of the range
2254 * presented which is present (resident),
2255 * starting with the start address
2256 * presented
2257 * o UPL_ROP_DUMP Dump the pages which are found in the
2258 * target object for the target range.
2259 *
2260 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2261 * multiple regions in the range, only the first matching region
2262 * is returned.
2263 */
55e303ae
A
2264kern_return_t
2265ubc_range_op(
2266 struct vnode *vp,
2267 off_t f_offset_beg,
2268 off_t f_offset_end,
2269 int ops,
2270 int *range)
2271{
2272 memory_object_control_t control;
2273
2274 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2275 if (control == MEMORY_OBJECT_CONTROL_NULL)
2276 return KERN_INVALID_ARGUMENT;
2277
2278 return (memory_object_range_op(control,
2279 (memory_object_offset_t)f_offset_beg,
2280 (memory_object_offset_t)f_offset_end,
2281 ops,
2282 range));
2283}
2d21ac55
A
2284
2285
2286/*
2287 * ubc_create_upl
2288 *
2289 * Given a vnode, cause the population of a portion of the vm_object; based on
2290 * the nature of the request, the pages returned may contain valid data, or
2291 * they may be uninitialized.
2292 *
2293 * Parameters: vp The vnode from which to create the upl
2294 * f_offset The start offset into the backing store
2295 * represented by the vnode
2296 * bufsize The size of the upl to create
2297 * uplp Pointer to the upl_t to receive the
2298 * created upl; MUST NOT be NULL
2299 * plp Pointer to receive the internal page
2300 * list for the created upl; MAY be NULL
2301 * to ignore
2302 *
2303 * Returns: KERN_SUCCESS The requested upl has been created
2304 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2305 * multiple of the page size
2306 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2307 * the vnode, or there is no memory object
2308 * control associated with the ubc_info
2309 * memory_object_upl_request:KERN_INVALID_VALUE
2310 * The supplied upl_flags argument is
2311 * invalid
2312 * Implicit Returns:
2313 * *uplp (modified)
2314 * *plp (modified) If non-NULL, the value of *plp will be
2315 * modified to point to the internal page
2316 * list; this modification may occur even
2317 * if this function is unsuccessful, in
2318 * which case the contents may be invalid
2319 *
2320 * Note: If successful, the returned *uplp MUST subsequently be freed
2321 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2322 * ubc_upl_abort(), or ubc_upl_abort_range().
2323 */
0b4e3aa0 2324kern_return_t
5ba3f43e 2325ubc_create_upl_external(
0b4e3aa0 2326 struct vnode *vp,
2d21ac55 2327 off_t f_offset,
b0d623f7 2328 int bufsize,
2d21ac55 2329 upl_t *uplp,
0b4e3aa0 2330 upl_page_info_t **plp,
2d21ac55 2331 int uplflags)
5ba3f43e
A
2332{
2333 return (ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt()));
2334}
2335
2336kern_return_t
2337ubc_create_upl_kernel(
2338 struct vnode *vp,
2339 off_t f_offset,
2340 int bufsize,
2341 upl_t *uplp,
2342 upl_page_info_t **plp,
2343 int uplflags,
2344 vm_tag_t tag)
0b4e3aa0
A
2345{
2346 memory_object_control_t control;
55e303ae 2347 kern_return_t kr;
b0d623f7
A
2348
2349 if (plp != NULL)
2350 *plp = NULL;
2351 *uplp = NULL;
0b4e3aa0
A
2352
2353 if (bufsize & 0xfff)
2354 return KERN_INVALID_ARGUMENT;
2355
fe8ab488 2356 if (bufsize > MAX_UPL_SIZE_BYTES)
6d2010ae
A
2357 return KERN_INVALID_ARGUMENT;
2358
b0d623f7
A
2359 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2360
2361 if (uplflags & UPL_UBC_MSYNC) {
2362 uplflags &= UPL_RET_ONLY_DIRTY;
2363
2364 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2365 UPL_SET_INTERNAL | UPL_SET_LITE;
2366
2367 } else if (uplflags & UPL_UBC_PAGEOUT) {
2368 uplflags &= UPL_RET_ONLY_DIRTY;
2369
2370 if (uplflags & UPL_RET_ONLY_DIRTY)
2371 uplflags |= UPL_NOBLOCK;
2372
2373 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2374 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2375 } else {
316670eb 2376 uplflags |= UPL_RET_ONLY_ABSENT |
b0d623f7
A
2377 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2378 UPL_SET_INTERNAL | UPL_SET_LITE;
316670eb
A
2379
2380 /*
2381 * if the requested size == PAGE_SIZE, we don't want to set
2382 * the UPL_NOBLOCK since we may be trying to recover from a
2383 * previous partial pagein I/O that occurred because we were low
2384 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2385 * since we're only asking for a single page, we can block w/o fear
2386 * of tying up pages while waiting for more to become available
2387 */
2388 if (bufsize > PAGE_SIZE)
2389 uplflags |= UPL_NOBLOCK;
b0d623f7
A
2390 }
2391 } else {
55e303ae 2392 uplflags &= ~UPL_FOR_PAGEOUT;
55e303ae 2393
b0d623f7
A
2394 if (uplflags & UPL_WILL_BE_DUMPED) {
2395 uplflags &= ~UPL_WILL_BE_DUMPED;
2396 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
2397 } else
2398 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
2399 }
2400 control = ubc_getobject(vp, UBC_FLAGS_NONE);
0b4e3aa0
A
2401 if (control == MEMORY_OBJECT_CONTROL_NULL)
2402 return KERN_INVALID_ARGUMENT;
2403
5ba3f43e 2404 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
b0d623f7
A
2405 if (kr == KERN_SUCCESS && plp != NULL)
2406 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
0b4e3aa0
A
2407 return kr;
2408}
2d21ac55
A
2409
2410
2411/*
2412 * ubc_upl_maxbufsize
2413 *
2414 * Return the maximum bufsize ubc_create_upl( ) will take.
2415 *
2416 * Parameters: none
2417 *
2418 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2419 */
2420upl_size_t
2421ubc_upl_maxbufsize(
2422 void)
2423{
fe8ab488 2424 return(MAX_UPL_SIZE_BYTES);
2d21ac55 2425}
0b4e3aa0 2426
2d21ac55
A
2427/*
2428 * ubc_upl_map
2429 *
2430 * Map the page list assocated with the supplied upl into the kernel virtual
2431 * address space at the virtual address indicated by the dst_addr argument;
2432 * the entire upl is mapped
2433 *
2434 * Parameters: upl The upl to map
2435 * dst_addr The address at which to map the upl
2436 *
2437 * Returns: KERN_SUCCESS The upl has been mapped
2438 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2439 * KERN_FAILURE The upl is already mapped
2440 * vm_map_enter:KERN_INVALID_ARGUMENT
2441 * A failure code from vm_map_enter() due
2442 * to an invalid argument
2443 */
0b4e3aa0
A
2444kern_return_t
2445ubc_upl_map(
2446 upl_t upl,
2447 vm_offset_t *dst_addr)
2448{
2449 return (vm_upl_map(kernel_map, upl, dst_addr));
2450}
2451
2452
2d21ac55
A
2453/*
2454 * ubc_upl_unmap
2455 *
2456 * Unmap the page list assocated with the supplied upl from the kernel virtual
2457 * address space; the entire upl is unmapped.
2458 *
2459 * Parameters: upl The upl to unmap
2460 *
2461 * Returns: KERN_SUCCESS The upl has been unmapped
2462 * KERN_FAILURE The upl is not currently mapped
2463 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2464 */
0b4e3aa0
A
2465kern_return_t
2466ubc_upl_unmap(
2467 upl_t upl)
2468{
2469 return(vm_upl_unmap(kernel_map, upl));
2470}
2471
2d21ac55
A
2472
2473/*
2474 * ubc_upl_commit
2475 *
2476 * Commit the contents of the upl to the backing store
2477 *
2478 * Parameters: upl The upl to commit
2479 *
2480 * Returns: KERN_SUCCESS The upl has been committed
2481 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2482 * KERN_FAILURE The supplied upl does not represent
2483 * device memory, and the offset plus the
2484 * size would exceed the actual size of
2485 * the upl
2486 *
2487 * Notes: In practice, the only return value for this function should be
2488 * KERN_SUCCESS, unless there has been data structure corruption;
2489 * since the upl is deallocated regardless of success or failure,
2490 * there's really nothing to do about this other than panic.
2491 *
2492 * IMPORTANT: Use of this function should not be mixed with use of
2493 * ubc_upl_commit_range(), due to the unconditional deallocation
2494 * by this function.
2495 */
0b4e3aa0
A
2496kern_return_t
2497ubc_upl_commit(
2498 upl_t upl)
2499{
2500 upl_page_info_t *pl;
2501 kern_return_t kr;
2502
2503 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
fe8ab488 2504 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
0b4e3aa0
A
2505 upl_deallocate(upl);
2506 return kr;
1c79356b
A
2507}
2508
0b4e3aa0 2509
2d21ac55
A
2510/*
2511 * ubc_upl_commit
2512 *
2513 * Commit the contents of the specified range of the upl to the backing store
2514 *
2515 * Parameters: upl The upl to commit
2516 * offset The offset into the upl
2517 * size The size of the region to be committed,
2518 * starting at the specified offset
2519 * flags commit type (see below)
2520 *
2521 * Returns: KERN_SUCCESS The range has been committed
2522 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2523 * KERN_FAILURE The supplied upl does not represent
2524 * device memory, and the offset plus the
2525 * size would exceed the actual size of
2526 * the upl
2527 *
2528 * Notes: IMPORTANT: If the commit is successful, and the object is now
2529 * empty, the upl will be deallocated. Since the caller cannot
2530 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2531 * should generally only be used when the offset is 0 and the size
2532 * is equal to the upl size.
2533 *
2534 * The flags argument is a bitmap of flags on the rage of pages in
2535 * the upl to be committed; allowable flags are:
2536 *
2537 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2538 * both empty and has been
2539 * successfully committed
2540 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2541 * bit; will prevent a
2542 * later pageout
2543 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2544 * bit; will cause a later
2545 * pageout
2546 * o UPL_COMMIT_INACTIVATE Clear each pages
2547 * reference bit; the page
2548 * will not be accessed
2549 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2550 * become busy when an
2551 * IOMemoryDescriptor is
2552 * mapped or redirected,
2553 * and we have to wait for
2554 * an IOKit driver
2555 *
2556 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2557 * not be specified by the caller.
2558 *
2559 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2560 * mutually exclusive, and should not be combined.
2561 */
0b4e3aa0
A
2562kern_return_t
2563ubc_upl_commit_range(
2564 upl_t upl,
b0d623f7
A
2565 upl_offset_t offset,
2566 upl_size_t size,
0b4e3aa0
A
2567 int flags)
2568{
2569 upl_page_info_t *pl;
2570 boolean_t empty;
2571 kern_return_t kr;
2572
2573 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
2574 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2575
593a1d5f
A
2576 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2577 return KERN_INVALID_ARGUMENT;
2578 }
2579
0b4e3aa0
A
2580 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2581
2582 kr = upl_commit_range(upl, offset, size, flags,
fe8ab488 2583 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
0b4e3aa0
A
2584
2585 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
2586 upl_deallocate(upl);
2587
2588 return kr;
2589}
2d21ac55
A
2590
2591
2592/*
2593 * ubc_upl_abort_range
2594 *
2595 * Abort the contents of the specified range of the specified upl
2596 *
2597 * Parameters: upl The upl to abort
2598 * offset The offset into the upl
2599 * size The size of the region to be aborted,
2600 * starting at the specified offset
2601 * abort_flags abort type (see below)
2602 *
2603 * Returns: KERN_SUCCESS The range has been aborted
2604 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2605 * KERN_FAILURE The supplied upl does not represent
2606 * device memory, and the offset plus the
2607 * size would exceed the actual size of
2608 * the upl
2609 *
2610 * Notes: IMPORTANT: If the abort is successful, and the object is now
2611 * empty, the upl will be deallocated. Since the caller cannot
2612 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2613 * should generally only be used when the offset is 0 and the size
2614 * is equal to the upl size.
2615 *
2616 * The abort_flags argument is a bitmap of flags on the range of
2617 * pages in the upl to be aborted; allowable flags are:
2618 *
2619 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2620 * empty and has been successfully
2621 * aborted
2622 * o UPL_ABORT_RESTART The operation must be restarted
2623 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2624 * o UPL_ABORT_ERROR An I/O error occurred
2625 * o UPL_ABORT_DUMP_PAGES Just free the pages
2626 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2627 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2628 *
2629 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2630 * not be specified by the caller. It is intended to fulfill the
2631 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2632 * ubc_upl_commit_range(), but is never referenced internally.
2633 *
2634 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2635 * referenced; do not use it.
2636 */
0b4e3aa0
A
2637kern_return_t
2638ubc_upl_abort_range(
2639 upl_t upl,
b0d623f7
A
2640 upl_offset_t offset,
2641 upl_size_t size,
0b4e3aa0
A
2642 int abort_flags)
2643{
2644 kern_return_t kr;
2645 boolean_t empty = FALSE;
2646
2647 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
2648 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2649
2650 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2651
2652 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
2653 upl_deallocate(upl);
2654
2655 return kr;
2656}
2657
2d21ac55
A
2658
2659/*
2660 * ubc_upl_abort
2661 *
2662 * Abort the contents of the specified upl
2663 *
2664 * Parameters: upl The upl to abort
2665 * abort_type abort type (see below)
2666 *
2667 * Returns: KERN_SUCCESS The range has been aborted
2668 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2669 * KERN_FAILURE The supplied upl does not represent
2670 * device memory, and the offset plus the
2671 * size would exceed the actual size of
2672 * the upl
2673 *
2674 * Notes: IMPORTANT: If the abort is successful, and the object is now
2675 * empty, the upl will be deallocated. Since the caller cannot
2676 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2677 * should generally only be used when the offset is 0 and the size
2678 * is equal to the upl size.
2679 *
2680 * The abort_type is a bitmap of flags on the range of
2681 * pages in the upl to be aborted; allowable flags are:
2682 *
2683 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2684 * empty and has been successfully
2685 * aborted
2686 * o UPL_ABORT_RESTART The operation must be restarted
2687 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2688 * o UPL_ABORT_ERROR An I/O error occurred
2689 * o UPL_ABORT_DUMP_PAGES Just free the pages
2690 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2691 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2692 *
2693 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2694 * not be specified by the caller. It is intended to fulfill the
2695 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2696 * ubc_upl_commit_range(), but is never referenced internally.
2697 *
2698 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2699 * referenced; do not use it.
2700 */
0b4e3aa0
A
2701kern_return_t
2702ubc_upl_abort(
2703 upl_t upl,
2704 int abort_type)
2705{
2706 kern_return_t kr;
2707
2708 kr = upl_abort(upl, abort_type);
2709 upl_deallocate(upl);
2710 return kr;
2711}
2712
2d21ac55
A
2713
2714/*
2715 * ubc_upl_pageinfo
2716 *
2717 * Retrieve the internal page list for the specified upl
2718 *
2719 * Parameters: upl The upl to obtain the page list from
2720 *
2721 * Returns: !NULL The (upl_page_info_t *) for the page
2722 * list internal to the upl
2723 * NULL Error/no page list associated
2724 *
2725 * Notes: IMPORTANT: The function is only valid on internal objects
2726 * where the list request was made with the UPL_INTERNAL flag.
2727 *
2728 * This function is a utility helper function, since some callers
2729 * may not have direct access to the header defining the macro,
2730 * due to abstraction layering constraints.
2731 */
0b4e3aa0
A
2732upl_page_info_t *
2733ubc_upl_pageinfo(
2734 upl_t upl)
2735{
2736 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
2737}
91447636 2738
91447636
A
2739
2740int
fe8ab488 2741UBCINFOEXISTS(const struct vnode * vp)
91447636 2742{
2d21ac55 2743 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
91447636
A
2744}
2745
2d21ac55 2746
316670eb
A
2747void
2748ubc_upl_range_needed(
2749 upl_t upl,
2750 int index,
2751 int count)
2752{
2753 upl_range_needed(upl, index, count);
2754}
2755
fe8ab488
A
2756boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
2757{
2758 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED))
2759 return FALSE;
2760 if (writable)
2761 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
2762 return TRUE;
2763}
2764
2765boolean_t ubc_is_mapped_writable(const struct vnode *vp)
2766{
2767 boolean_t writable;
2768 return ubc_is_mapped(vp, &writable) && writable;
2769}
2770
316670eb 2771
2d21ac55
A
2772/*
2773 * CODE SIGNING
2774 */
2d21ac55
A
2775static volatile SInt32 cs_blob_size = 0;
2776static volatile SInt32 cs_blob_count = 0;
2777static SInt32 cs_blob_size_peak = 0;
2778static UInt32 cs_blob_size_max = 0;
2779static SInt32 cs_blob_count_peak = 0;
2d21ac55 2780
6d2010ae
A
2781SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
2782SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
2783SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2784SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
2785SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
2d21ac55 2786
3e170ce0
A
2787/*
2788 * Function: csblob_parse_teamid
2789 *
2790 * Description: This function returns a pointer to the team id
2791 stored within the codedirectory of the csblob.
2792 If the codedirectory predates team-ids, it returns
2793 NULL.
2794 This does not copy the name but returns a pointer to
2795 it within the CD. Subsequently, the CD must be
2796 available when this is used.
2797*/
2798
2799static const char *
2800csblob_parse_teamid(struct cs_blob *csblob)
2801{
2802 const CS_CodeDirectory *cd;
2803
490019cf 2804 cd = csblob->csb_cd;
3e170ce0
A
2805
2806 if (ntohl(cd->version) < CS_SUPPORTSTEAMID)
2807 return NULL;
2808
2809 if (cd->teamOffset == 0)
2810 return NULL;
2811
2812 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
2813 if (cs_debug > 1)
2814 printf("found team-id %s in cdblob\n", name);
2815
2816 return name;
2817}
2818
39236c6e 2819
593a1d5f
A
2820kern_return_t
2821ubc_cs_blob_allocate(
2822 vm_offset_t *blob_addr_p,
2823 vm_size_t *blob_size_p)
2824{
d9a64523 2825 kern_return_t kr = KERN_FAILURE;
593a1d5f 2826
d9a64523
A
2827 {
2828 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2829
2830 if (*blob_addr_p == 0) {
2831 kr = KERN_NO_SPACE;
2832 } else {
2833 kr = KERN_SUCCESS;
2834 }
593a1d5f 2835 }
d9a64523 2836
593a1d5f
A
2837 return kr;
2838}
2839
2840void
2841ubc_cs_blob_deallocate(
2842 vm_offset_t blob_addr,
2843 vm_size_t blob_size)
2844{
d9a64523
A
2845#if PMAP_CS
2846 if (blob_size > pmap_cs_blob_limit) {
2847 kmem_free(kernel_map, blob_addr, blob_size);
2848 } else
2849#endif
2850 {
2851 kfree((void *) blob_addr, blob_size);
2852 }
39037602
A
2853}
2854
2855/*
2856 * Some codesigned files use a lowest common denominator page size of
2857 * 4KiB, but can be used on systems that have a runtime page size of
2858 * 16KiB. Since faults will only occur on 16KiB ranges in
2859 * cs_validate_range(), we can convert the original Code Directory to
2860 * a multi-level scheme where groups of 4 hashes are combined to form
2861 * a new hash, which represents 16KiB in the on-disk file. This can
2862 * reduce the wired memory requirement for the Code Directory by
2863 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2864 * for unaligned access, which may still attempt to validate on
2865 * non-16KiB multiples for compatibility with 3rd party binaries.
2866 */
2867static boolean_t
2868ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
2869{
2870 const CS_CodeDirectory *cd;
2871
d9a64523 2872
39037602
A
2873 /*
2874 * Only applies to binaries that ship as part of the OS,
2875 * primarily the shared cache.
2876 */
2877 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
2878 return FALSE;
2879 }
2880
2881 /*
2882 * If the runtime page size matches the code signing page
2883 * size, there is no work to do.
2884 */
2885 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
2886 return FALSE;
2887 }
2888
2889 cd = blob->csb_cd;
2890
2891 /*
2892 * There must be a valid integral multiple of hashes
2893 */
2894 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2895 return FALSE;
2896 }
2897
2898 /*
2899 * Scatter lists must also have ranges that have an integral number of hashes
2900 */
2901 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
2902
2903 const SC_Scatter *scatter = (const SC_Scatter*)
2904 ((const char*)cd + ntohl(cd->scatterOffset));
2905 /* iterate all scatter structs to make sure they are all aligned */
2906 do {
2907 uint32_t sbase = ntohl(scatter->base);
2908 uint32_t scount = ntohl(scatter->count);
2909
2910 /* last scatter? */
2911 if (scount == 0) {
2912 break;
2913 }
2914
2915 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2916 return FALSE;
2917 }
2918
2919 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
2920 return FALSE;
2921 }
2922
2923 scatter++;
2924 } while(1);
2925 }
2926
2927 /* Covered range must be a multiple of the new page size */
2928 if (ntohl(cd->codeLimit) & PAGE_MASK) {
2929 return FALSE;
2930 }
2931
2932 /* All checks pass */
2933 return TRUE;
2934}
2935
2936/*
d9a64523
A
2937 * Given a cs_blob with an already chosen best code directory, this
2938 * function allocates memory and copies into it only the blobs that
2939 * will be needed by the kernel, namely the single chosen code
2940 * directory (and not any of its alternatives) and the entitlement
2941 * blob.
2942 *
2943 * This saves significant memory with agile signatures, and additional
2944 * memory for 3rd Party Code because we also omit the CMS blob.
2945 *
2946 * To support multilevel and other potential code directory rewriting,
2947 * the size of a new code directory can be specified. Since that code
2948 * directory will replace the existing code directory,
2949 * ubc_cs_reconstitute_code_signature does not copy the original code
2950 * directory when a size is given, and the caller must fill it in.
39037602 2951 */
d9a64523
A
2952static int
2953ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
2954 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
2955 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
39037602
A
2956{
2957 const CS_CodeDirectory *old_cd, *cd;
2958 CS_CodeDirectory *new_cd;
2959 const CS_GenericBlob *entitlements;
2960 vm_offset_t new_blob_addr;
2961 vm_size_t new_blob_size;
2962 vm_size_t new_cdsize;
2963 kern_return_t kr;
2964 int error;
39037602
A
2965
2966 old_cd = blob->csb_cd;
2967
d9a64523 2968 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
39037602
A
2969
2970 new_blob_size = sizeof(CS_SuperBlob);
2971 new_blob_size += sizeof(CS_BlobIndex);
2972 new_blob_size += new_cdsize;
2973
2974 if (blob->csb_entitlements_blob) {
2975 /* We need to add a slot for the entitlements */
2976 new_blob_size += sizeof(CS_BlobIndex);
2977 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
2978 }
2979
2980 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
2981 if (kr != KERN_SUCCESS) {
2982 if (cs_debug > 1) {
2983 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
2984 kr);
2985 }
d9a64523 2986 return ENOMEM;
39037602
A
2987 }
2988
2989 CS_SuperBlob *new_superblob;
2990
2991 new_superblob = (CS_SuperBlob *)new_blob_addr;
2992 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
2993 new_superblob->length = htonl((uint32_t)new_blob_size);
2994 if (blob->csb_entitlements_blob) {
2995 vm_size_t ent_offset, cd_offset;
2996
2997 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
2998 ent_offset = cd_offset + new_cdsize;
2999
3000 new_superblob->count = htonl(2);
3001 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3002 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3003 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3004 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3005
3006 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3007
3008 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3009 } else {
d9a64523
A
3010 // Blob is the code directory, directly.
3011 new_cd = (CS_CodeDirectory *)new_blob_addr;
3012 }
39037602 3013
d9a64523
A
3014 if (optional_new_cd_size == 0) {
3015 // Copy code directory, and revalidate.
3016 memcpy(new_cd, old_cd, new_cdsize);
39037602 3017
d9a64523 3018 vm_size_t length = new_blob_size;
39037602 3019
d9a64523
A
3020 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3021
3022 if (error) {
3023 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3024 error);
3025
3026 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3027 return error;
3028 }
3029 *new_entitlements_p = entitlements;
3030 } else {
3031 // Caller will fill out and validate code directory.
3032 memset(new_cd, 0, new_cdsize);
3033 *new_entitlements_p = NULL;
3034 }
3035
3036 *new_blob_addr_p = new_blob_addr;
3037 *new_blob_size_p = new_blob_size;
3038 *new_cd_p = new_cd;
3039
3040 return 0;
3041}
3042
3043static int
3044ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3045{
3046 const CS_CodeDirectory *old_cd, *cd;
3047 CS_CodeDirectory *new_cd;
3048 const CS_GenericBlob *entitlements;
3049 vm_offset_t new_blob_addr;
3050 vm_size_t new_blob_size;
3051 vm_size_t new_cdsize;
3052 int error;
3053
3054 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3055
3056 if (cs_debug > 1) {
3057 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3058 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3059 }
3060
3061 old_cd = blob->csb_cd;
3062
3063 /* Up to the hashes, we can copy all data */
3064 new_cdsize = ntohl(old_cd->hashOffset);
3065 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3066
3067 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
3068 &new_blob_addr, &new_blob_size, &new_cd,
3069 &entitlements);
3070 if (error != 0) {
3071 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3072 return error;
39037602
A
3073 }
3074
3075 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3076
3077 /* Update fields in the Code Directory structure */
3078 new_cd->length = htonl((uint32_t)new_cdsize);
3079
3080 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3081 nCodeSlots >>= hashes_per_new_hash_shift;
3082 new_cd->nCodeSlots = htonl(nCodeSlots);
3083
3084 new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
3085
3086 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3087 SC_Scatter *scatter = (SC_Scatter*)
3088 ((char *)new_cd + ntohl(new_cd->scatterOffset));
3089 /* iterate all scatter structs to scale their counts */
3090 do {
3091 uint32_t scount = ntohl(scatter->count);
3092 uint32_t sbase = ntohl(scatter->base);
3093
3094 /* last scatter? */
3095 if (scount == 0) {
3096 break;
3097 }
3098
3099 scount >>= hashes_per_new_hash_shift;
3100 scatter->count = htonl(scount);
3101
3102 sbase >>= hashes_per_new_hash_shift;
3103 scatter->base = htonl(sbase);
3104
3105 scatter++;
3106 } while(1);
3107 }
3108
3109 /* For each group of hashes, hash them together */
3110 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3111 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3112
3113 uint32_t hash_index;
3114 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3115 union cs_hash_union mdctx;
3116
3117 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3118 const unsigned char *src = src_base + hash_index * source_hash_len;
3119 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3120
3121 blob->csb_hashtype->cs_init(&mdctx);
3122 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3123 blob->csb_hashtype->cs_final(dst, &mdctx);
3124 }
3125
d9a64523
A
3126 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3127 if (error != 0) {
39037602 3128
d9a64523
A
3129 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3130 error);
39037602
A
3131
3132 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
d9a64523 3133 return error;
39037602
A
3134 }
3135
d9a64523 3136 /* New Code Directory is ready for use, swap it out in the blob structure */
39037602
A
3137 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3138
3139 blob->csb_mem_size = new_blob_size;
3140 blob->csb_mem_kaddr = new_blob_addr;
3141 blob->csb_cd = cd;
3142 blob->csb_entitlements_blob = entitlements;
3143
3144 /* The blob has some cached attributes of the Code Directory, so update those */
3145
3146 blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
3147
3148 blob->csb_hash_pagesize = PAGE_SIZE;
3149 blob->csb_hash_pagemask = PAGE_MASK;
3150 blob->csb_hash_pageshift = PAGE_SHIFT;
3151 blob->csb_end_offset = ntohl(cd->codeLimit);
3152 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3153 const SC_Scatter *scatter = (const SC_Scatter*)
3154 ((const char*)cd + ntohl(cd->scatterOffset));
3155 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3156 } else {
3157 blob->csb_start_offset = 0;
3158 }
d9a64523
A
3159
3160 return 0;
593a1d5f 3161}
39236c6e 3162
d9a64523
A
3163/*
3164 * Validate the code signature blob, create a struct cs_blob wrapper
3165 * and return it together with a pointer to the chosen code directory
3166 * and entitlements blob.
3167 *
3168 * Note that this takes ownership of the memory as addr, mainly because
3169 * this function can actually replace the passed in blob with another
3170 * one, e.g. when performing multilevel hashing optimization.
3171 */
2d21ac55 3172int
d9a64523
A
3173cs_blob_create_validated(
3174 vm_address_t * const addr,
3175 vm_size_t size,
3176 struct cs_blob ** const ret_blob,
3177 CS_CodeDirectory const ** const ret_cd)
91447636 3178{
d9a64523
A
3179 struct cs_blob *blob;
3180 int error = EINVAL;
2d21ac55 3181 const CS_CodeDirectory *cd;
39037602 3182 const CS_GenericBlob *entitlements;
3e170ce0 3183 union cs_hash_union mdctx;
813fb2f6 3184 size_t length;
15129b1c 3185
3e170ce0
A
3186 if (ret_blob)
3187 *ret_blob = NULL;
2d21ac55 3188
2d21ac55
A
3189 blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
3190 if (blob == NULL) {
3191 return ENOMEM;
3192 }
3193
2d21ac55 3194 /* fill in the new blob */
2d21ac55
A
3195 blob->csb_mem_size = size;
3196 blob->csb_mem_offset = 0;
39037602 3197 blob->csb_mem_kaddr = *addr;
39236c6e 3198 blob->csb_flags = 0;
5ba3f43e 3199 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
fe8ab488 3200 blob->csb_platform_binary = 0;
3e170ce0 3201 blob->csb_platform_path = 0;
fe8ab488 3202 blob->csb_teamid = NULL;
39037602
A
3203 blob->csb_entitlements_blob = NULL;
3204 blob->csb_entitlements = NULL;
d9a64523
A
3205 blob->csb_reconstituted = false;
3206
39037602
A
3207 /* Transfer ownership. Even on error, this function will deallocate */
3208 *addr = 0;
3209
2d21ac55
A
3210 /*
3211 * Validate the blob's contents
3212 */
813fb2f6
A
3213 length = (size_t) size;
3214 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
d9a64523 3215 length, &cd, &entitlements);
39236c6e 3216 if (error) {
4bd07ac2 3217
813fb2f6 3218 if (cs_debug)
39236c6e 3219 printf("CODESIGNING: csblob invalid: %d\n", error);
813fb2f6
A
3220 /*
3221 * The vnode checker can't make the rest of this function
3222 * succeed if csblob validation failed, so bail */
3223 goto out;
4bd07ac2 3224
2d21ac55 3225 } else {
3e170ce0
A
3226 const unsigned char *md_base;
3227 uint8_t hash[CS_HASH_MAX_SIZE];
3228 int md_size;
3229
490019cf 3230 blob->csb_cd = cd;
39037602 3231 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3e170ce0
A
3232 blob->csb_hashtype = cs_find_md(cd->hashType);
3233 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash))
3234 panic("validated CodeDirectory but unsupported type");
39037602
A
3235
3236 blob->csb_hash_pageshift = cd->pageSize;
3237 blob->csb_hash_pagesize = (1U << cd->pageSize);
3238 blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
3239 blob->csb_hash_firstlevel_pagesize = 0;
39236c6e 3240 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
39037602 3241 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
39236c6e
A
3242 if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3243 const SC_Scatter *scatter = (const SC_Scatter*)
b0d623f7 3244 ((const char*)cd + ntohl(cd->scatterOffset));
39037602 3245 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
b0d623f7 3246 } else {
3e170ce0 3247 blob->csb_start_offset = 0;
b0d623f7 3248 }
3e170ce0
A
3249 /* compute the blob's cdhash */
3250 md_base = (const unsigned char *) cd;
3251 md_size = ntohl(cd->length);
3252
3253 blob->csb_hashtype->cs_init(&mdctx);
3254 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3255 blob->csb_hashtype->cs_final(hash, &mdctx);
3256
3257 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
2d21ac55
A
3258 }
3259
d9a64523
A
3260 error = 0;
3261
3262out:
3263 if (error != 0) {
3264 cs_blob_free(blob);
3265 blob = NULL;
3266 cd = NULL;
3267 }
3268
3269 if (ret_blob != NULL) {
3270 *ret_blob = blob;
3271 }
3272 if (ret_cd != NULL) {
3273 *ret_cd = cd;
3274 }
3275
3276 return error;
3277}
3278
3279/*
3280 * Free a cs_blob previously created by cs_blob_create_validated.
3281 */
3282void
3283cs_blob_free(
3284 struct cs_blob * const blob)
3285{
3286 if (blob != NULL) {
3287 if (blob->csb_mem_kaddr) {
3288 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3289 blob->csb_mem_kaddr = 0;
3290 }
3291 if (blob->csb_entitlements != NULL) {
3292 osobject_release(blob->csb_entitlements);
3293 blob->csb_entitlements = NULL;
3294 }
3295 kfree(blob, sizeof (*blob));
3296 }
3297}
3298
3299int
3300ubc_cs_blob_add(
3301 struct vnode *vp,
3302 cpu_type_t cputype,
3303 off_t base_offset,
3304 vm_address_t *addr,
3305 vm_size_t size,
3306 struct image_params *imgp,
3307 __unused int flags,
3308 struct cs_blob **ret_blob)
3309{
3310 kern_return_t kr;
3311 struct ubc_info *uip;
3312 struct cs_blob *blob, *oblob;
3313 int error;
3314 CS_CodeDirectory const *cd;
3315 off_t blob_start_offset, blob_end_offset;
3316 boolean_t record_mtime;
3317
3318 record_mtime = FALSE;
3319 if (ret_blob)
3320 *ret_blob = NULL;
3321
3322 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3323 * Validates the passed in blob in the process. */
3324 error = cs_blob_create_validated(addr, size, &blob, &cd);
3325
3326 if (error != 0) {
3327 printf("malform code signature blob: %d\n", error);
3328 return error;
3329 }
3330
3331 blob->csb_cpu_type = cputype;
3332 blob->csb_base_offset = base_offset;
3333
3334 /*
593a1d5f
A
3335 * Let policy module check whether the blob's signature is accepted.
3336 */
3337#if CONFIG_MACF
39037602 3338 unsigned int cs_flags = blob->csb_flags;
5ba3f43e
A
3339 unsigned int signer_type = blob->csb_signer_type;
3340 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
39037602 3341 blob->csb_flags = cs_flags;
5ba3f43e 3342 blob->csb_signer_type = signer_type;
39037602 3343
fe8ab488
A
3344 if (error) {
3345 if (cs_debug)
3346 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
593a1d5f 3347 goto out;
fe8ab488 3348 }
39037602 3349 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
c18c124e
A
3350 if (cs_debug)
3351 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
3352 error = EPERM;
3353 goto out;
3354 }
5ba3f43e
A
3355#endif
3356
d9a64523
A
3357#if CONFIG_ENFORCE_SIGNED_CODE
3358 /*
3359 * Reconstitute code signature
3360 */
3361 {
3362 vm_address_t new_mem_kaddr = 0;
3363 vm_size_t new_mem_size = 0;
3364
3365 CS_CodeDirectory *new_cd = NULL;
3366 CS_GenericBlob const *new_entitlements = NULL;
3367
3368 error = ubc_cs_reconstitute_code_signature(blob, 0,
3369 &new_mem_kaddr, &new_mem_size,
3370 &new_cd, &new_entitlements);
3371
3372 if (error != 0) {
3373 printf("failed code signature reconstitution: %d\n", error);
3374 goto out;
3375 }
3376
3377 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3378
3379 blob->csb_mem_kaddr = new_mem_kaddr;
3380 blob->csb_mem_size = new_mem_size;
3381 blob->csb_cd = new_cd;
3382 blob->csb_entitlements_blob = new_entitlements;
3383 blob->csb_reconstituted = true;
3384 }
3385
3386#endif
3387
3388
39037602 3389 if (blob->csb_flags & CS_PLATFORM_BINARY) {
fe8ab488
A
3390 if (cs_debug > 1)
3391 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
3392 blob->csb_platform_binary = 1;
39037602 3393 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
fe8ab488
A
3394 } else {
3395 blob->csb_platform_binary = 0;
3e170ce0
A
3396 blob->csb_platform_path = 0;
3397 blob->csb_teamid = csblob_parse_teamid(blob);
fe8ab488
A
3398 if (cs_debug > 1) {
3399 if (blob->csb_teamid)
3400 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
3401 else
3402 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
3403 }
3404 }
39037602 3405
2d21ac55
A
3406 /*
3407 * Validate the blob's coverage
3408 */
3409 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3410 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3411
cf7d32b8
A
3412 if (blob_start_offset >= blob_end_offset ||
3413 blob_start_offset < 0 ||
3414 blob_end_offset <= 0) {
2d21ac55
A
3415 /* reject empty or backwards blob */
3416 error = EINVAL;
3417 goto out;
3418 }
3419
39037602 3420 if (ubc_cs_supports_multilevel_hash(blob)) {
d9a64523
A
3421 error = ubc_cs_convert_to_multilevel_hash(blob);
3422 if (error != 0) {
3423 printf("failed multilevel hash conversion: %d\n", error);
3424 goto out;
3425 }
3426 blob->csb_reconstituted = true;
39037602
A
3427 }
3428
2d21ac55
A
3429 vnode_lock(vp);
3430 if (! UBCINFOEXISTS(vp)) {
3431 vnode_unlock(vp);
3432 error = ENOENT;
3433 goto out;
3434 }
3435 uip = vp->v_ubcinfo;
3436
3437 /* check if this new blob overlaps with an existing blob */
3438 for (oblob = uip->cs_blobs;
3439 oblob != NULL;
3440 oblob = oblob->csb_next) {
3441 off_t oblob_start_offset, oblob_end_offset;
3442
5ba3f43e
A
3443 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3444 vnode_unlock(vp);
3445 error = EALREADY;
3446 goto out;
3447 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
fe8ab488
A
3448 if (!oblob->csb_platform_binary) {
3449 vnode_unlock(vp);
3450 error = EALREADY;
3451 goto out;
3452 }
3453 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3454 if (oblob->csb_platform_binary ||
3455 oblob->csb_teamid == NULL ||
3456 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3457 vnode_unlock(vp);
3458 error = EALREADY;
3459 goto out;
3460 }
3461 } else { // non teamid binary needs to be the same for app slices
3462 if (oblob->csb_platform_binary ||
3463 oblob->csb_teamid != NULL) {
3464 vnode_unlock(vp);
3465 error = EALREADY;
3466 goto out;
3467 }
3468 }
3469
2d21ac55
A
3470 oblob_start_offset = (oblob->csb_base_offset +
3471 oblob->csb_start_offset);
3472 oblob_end_offset = (oblob->csb_base_offset +
3473 oblob->csb_end_offset);
3474 if (blob_start_offset >= oblob_end_offset ||
3475 blob_end_offset <= oblob_start_offset) {
3476 /* no conflict with this existing blob */
3477 } else {
3478 /* conflict ! */
3479 if (blob_start_offset == oblob_start_offset &&
3480 blob_end_offset == oblob_end_offset &&
3481 blob->csb_mem_size == oblob->csb_mem_size &&
3482 blob->csb_flags == oblob->csb_flags &&
3483 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3484 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3485 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3e170ce0
A
3486 !bcmp(blob->csb_cdhash,
3487 oblob->csb_cdhash,
3488 CS_CDHASH_LEN)) {
2d21ac55
A
3489 /*
3490 * We already have this blob:
3491 * we'll return success but
3492 * throw away the new blob.
3493 */
3494 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3495 /*
3496 * The old blob matches this one
3497 * but doesn't have any CPU type.
3498 * Update it with whatever the caller
3499 * provided this time.
3500 */
3501 oblob->csb_cpu_type = cputype;
3502 }
d9a64523
A
3503
3504 /* The signature is still accepted, so update the
3505 * generation count. */
3506 uip->cs_add_gen = cs_blob_generation_count;
3507
2d21ac55 3508 vnode_unlock(vp);
3e170ce0
A
3509 if (ret_blob)
3510 *ret_blob = oblob;
2d21ac55
A
3511 error = EAGAIN;
3512 goto out;
3513 } else {
3514 /* different blob: reject the new one */
3515 vnode_unlock(vp);
3516 error = EALREADY;
3517 goto out;
3518 }
3519 }
3520
3521 }
3522
fe8ab488 3523
2d21ac55
A
3524 /* mark this vnode's VM object as having "signed pages" */
3525 kr = memory_object_signed(uip->ui_control, TRUE);
3526 if (kr != KERN_SUCCESS) {
3527 vnode_unlock(vp);
3528 error = ENOENT;
3529 goto out;
3530 }
3531
15129b1c
A
3532 if (uip->cs_blobs == NULL) {
3533 /* loading 1st blob: record the file's current "modify time" */
3534 record_mtime = TRUE;
3535 }
3536
fe8ab488
A
3537 /* set the generation count for cs_blobs */
3538 uip->cs_add_gen = cs_blob_generation_count;
3539
2d21ac55
A
3540 /*
3541 * Add this blob to the list of blobs for this vnode.
3542 * We always add at the front of the list and we never remove a
3543 * blob from the list, so ubc_cs_get_blobs() can return whatever
3544 * the top of the list was and that list will remain valid
3545 * while we validate a page, even after we release the vnode's lock.
3546 */
3547 blob->csb_next = uip->cs_blobs;
3548 uip->cs_blobs = blob;
3549
3550 OSAddAtomic(+1, &cs_blob_count);
3551 if (cs_blob_count > cs_blob_count_peak) {
3552 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
3553 }
b0d623f7
A
3554 OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size);
3555 if ((SInt32) cs_blob_size > cs_blob_size_peak) {
3556 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
2d21ac55 3557 }
b0d623f7
A
3558 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
3559 cs_blob_size_max = (UInt32) blob->csb_mem_size;
2d21ac55
A
3560 }
3561
c331a0be 3562 if (cs_debug > 1) {
2d21ac55 3563 proc_t p;
39236c6e 3564 const char *name = vnode_getname_printable(vp);
2d21ac55
A
3565 p = current_proc();
3566 printf("CODE SIGNING: proc %d(%s) "
3567 "loaded %s signatures for file (%s) "
3568 "range 0x%llx:0x%llx flags 0x%x\n",
3569 p->p_pid, p->p_comm,
3570 blob->csb_cpu_type == -1 ? "detached" : "embedded",
39236c6e 3571 name,
2d21ac55
A
3572 blob->csb_base_offset + blob->csb_start_offset,
3573 blob->csb_base_offset + blob->csb_end_offset,
3574 blob->csb_flags);
39236c6e 3575 vnode_putname_printable(name);
2d21ac55
A
3576 }
3577
2d21ac55
A
3578 vnode_unlock(vp);
3579
15129b1c
A
3580 if (record_mtime) {
3581 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3582 }
3583
3e170ce0
A
3584 if (ret_blob)
3585 *ret_blob = blob;
3586
2d21ac55
A
3587 error = 0; /* success ! */
3588
3589out:
3590 if (error) {
fe8ab488
A
3591 if (cs_debug)
3592 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
3593
d9a64523 3594 cs_blob_free(blob);
2d21ac55
A
3595 }
3596
3597 if (error == EAGAIN) {
3598 /*
3599 * See above: error is EAGAIN if we were asked
3600 * to add an existing blob again. We cleaned the new
3601 * blob and we want to return success.
3602 */
3603 error = 0;
2d21ac55
A
3604 }
3605
3606 return error;
91447636
A
3607}
3608
3e170ce0
A
3609void
3610csvnode_print_debug(struct vnode *vp)
3611{
3612 const char *name = NULL;
3613 struct ubc_info *uip;
3614 struct cs_blob *blob;
3615
3616 name = vnode_getname_printable(vp);
3617 if (name) {
3618 printf("csvnode: name: %s\n", name);
3619 vnode_putname_printable(name);
3620 }
3621
3622 vnode_lock_spin(vp);
3623
3624 if (! UBCINFOEXISTS(vp)) {
3625 blob = NULL;
3626 goto out;
3627 }
3628
3629 uip = vp->v_ubcinfo;
3630 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
3631 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3632 (unsigned long)blob->csb_start_offset,
3633 (unsigned long)blob->csb_end_offset,
3634 blob->csb_flags,
3635 blob->csb_platform_binary ? "yes" : "no",
3636 blob->csb_platform_path ? "yes" : "no",
3637 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3638 }
3639
3640out:
3641 vnode_unlock(vp);
3642
3643}
3644
2d21ac55
A
3645struct cs_blob *
3646ubc_cs_blob_get(
3647 struct vnode *vp,
3648 cpu_type_t cputype,
3649 off_t offset)
91447636 3650{
2d21ac55
A
3651 struct ubc_info *uip;
3652 struct cs_blob *blob;
3653 off_t offset_in_blob;
3654
3655 vnode_lock_spin(vp);
3656
3657 if (! UBCINFOEXISTS(vp)) {
3658 blob = NULL;
3659 goto out;
3660 }
3661
3662 uip = vp->v_ubcinfo;
3663 for (blob = uip->cs_blobs;
3664 blob != NULL;
3665 blob = blob->csb_next) {
3666 if (cputype != -1 && blob->csb_cpu_type == cputype) {
3667 break;
3668 }
3669 if (offset != -1) {
3670 offset_in_blob = offset - blob->csb_base_offset;
3671 if (offset_in_blob >= blob->csb_start_offset &&
3672 offset_in_blob < blob->csb_end_offset) {
3673 /* our offset is covered by this blob */
3674 break;
3675 }
3676 }
3677 }
3678
3679out:
3680 vnode_unlock(vp);
3681
3682 return blob;
91447636 3683}
2d21ac55
A
3684
3685static void
3686ubc_cs_free(
3687 struct ubc_info *uip)
91447636 3688{
2d21ac55
A
3689 struct cs_blob *blob, *next_blob;
3690
3691 for (blob = uip->cs_blobs;
3692 blob != NULL;
3693 blob = next_blob) {
3694 next_blob = blob->csb_next;
2d21ac55 3695 OSAddAtomic(-1, &cs_blob_count);
b0d623f7 3696 OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size);
d9a64523 3697 cs_blob_free(blob);
2d21ac55 3698 }
6d2010ae
A
3699#if CHECK_CS_VALIDATION_BITMAP
3700 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
3701#endif
2d21ac55 3702 uip->cs_blobs = NULL;
91447636 3703}
2d21ac55 3704
fe8ab488
A
3705/* check cs blob generation on vnode
3706 * returns:
3707 * 0 : Success, the cs_blob attached is current
3708 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3709 */
3710int
3711ubc_cs_generation_check(
3712 struct vnode *vp)
3713{
3714 int retval = ENEEDAUTH;
3715
3716 vnode_lock_spin(vp);
3717
3718 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
3719 retval = 0;
3720 }
3721
3722 vnode_unlock(vp);
3723 return retval;
3724}
3725
3726int
3727ubc_cs_blob_revalidate(
3728 struct vnode *vp,
c18c124e 3729 struct cs_blob *blob,
39037602
A
3730 struct image_params *imgp,
3731 int flags
fe8ab488
A
3732 )
3733{
3734 int error = 0;
fe8ab488 3735 const CS_CodeDirectory *cd = NULL;
39037602 3736 const CS_GenericBlob *entitlements = NULL;
813fb2f6 3737 size_t size;
fe8ab488
A
3738 assert(vp != NULL);
3739 assert(blob != NULL);
3740
813fb2f6
A
3741 size = blob->csb_mem_size;
3742 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
d9a64523 3743 size, &cd, &entitlements);
fe8ab488
A
3744 if (error) {
3745 if (cs_debug) {
3746 printf("CODESIGNING: csblob invalid: %d\n", error);
3747 }
3748 goto out;
3749 }
3750
39037602 3751 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5ba3f43e 3752 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
d9a64523
A
3753
3754 if (blob->csb_reconstituted) {
3755 /*
3756 * Code signatures that have been modified after validation
3757 * cannot be revalidated inline from their in-memory blob.
3758 *
3759 * That's okay, though, because the only path left that relies
3760 * on revalidation of existing in-memory blobs is the legacy
3761 * detached signature database path, which only exists on macOS,
3762 * which does not do reconstitution of any kind.
3763 */
3764 if (cs_debug) {
3765 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3766 }
3767
3768 /*
3769 * EAGAIN tells the caller that they may reread the code
3770 * signature and try attaching it again, which is the same
3771 * thing they would do if there was no cs_blob yet in the
3772 * first place.
3773 *
3774 * Conveniently, after ubc_cs_blob_add did a successful
3775 * validation, it will detect that a matching cs_blob (cdhash,
3776 * offset, arch etc.) already exists, and return success
3777 * without re-adding a cs_blob to the vnode.
3778 */
3779 return EAGAIN;
3780 }
3781
fe8ab488
A
3782 /* callout to mac_vnode_check_signature */
3783#if CONFIG_MACF
5ba3f43e 3784 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
fe8ab488
A
3785 if (cs_debug && error) {
3786 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3787 }
39037602
A
3788#else
3789 (void)flags;
5ba3f43e 3790 (void)signer_type;
fe8ab488
A
3791#endif
3792
3793 /* update generation number if success */
3794 vnode_lock_spin(vp);
39037602 3795 blob->csb_flags = cs_flags;
5ba3f43e 3796 blob->csb_signer_type = signer_type;
fe8ab488
A
3797 if (UBCINFOEXISTS(vp)) {
3798 if (error == 0)
3799 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
3800 else
3801 vp->v_ubcinfo->cs_add_gen = 0;
3802 }
3803
3804 vnode_unlock(vp);
3805
3806out:
3807 return error;
3808}
3809
3810void
3811cs_blob_reset_cache()
3812{
3813 /* incrementing odd no by 2 makes sure '0' is never reached. */
3814 OSAddAtomic(+2, &cs_blob_generation_count);
3815 printf("Reseting cs_blob cache from all vnodes. \n");
3816}
3817
2d21ac55
A
3818struct cs_blob *
3819ubc_get_cs_blobs(
3820 struct vnode *vp)
91447636 3821{
2d21ac55
A
3822 struct ubc_info *uip;
3823 struct cs_blob *blobs;
3824
b0d623f7
A
3825 /*
3826 * No need to take the vnode lock here. The caller must be holding
3827 * a reference on the vnode (via a VM mapping or open file descriptor),
3828 * so the vnode will not go away. The ubc_info stays until the vnode
3829 * goes away. And we only modify "blobs" by adding to the head of the
3830 * list.
3831 * The ubc_info could go away entirely if the vnode gets reclaimed as
3832 * part of a forced unmount. In the case of a code-signature validation
3833 * during a page fault, the "paging_in_progress" reference on the VM
3834 * object guarantess that the vnode pager (and the ubc_info) won't go
3835 * away during the fault.
3836 * Other callers need to protect against vnode reclaim by holding the
3837 * vnode lock, for example.
3838 */
2d21ac55
A
3839
3840 if (! UBCINFOEXISTS(vp)) {
3841 blobs = NULL;
3842 goto out;
3843 }
3844
3845 uip = vp->v_ubcinfo;
3846 blobs = uip->cs_blobs;
3847
3848out:
2d21ac55 3849 return blobs;
91447636 3850}
2d21ac55 3851
15129b1c
A
3852void
3853ubc_get_cs_mtime(
3854 struct vnode *vp,
3855 struct timespec *cs_mtime)
3856{
3857 struct ubc_info *uip;
3858
3859 if (! UBCINFOEXISTS(vp)) {
3860 cs_mtime->tv_sec = 0;
3861 cs_mtime->tv_nsec = 0;
3862 return;
3863 }
3864
3865 uip = vp->v_ubcinfo;
3866 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
3867 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
3868}
3869
2d21ac55
A
3870unsigned long cs_validate_page_no_hash = 0;
3871unsigned long cs_validate_page_bad_hash = 0;
39037602
A
3872static boolean_t
3873cs_validate_hash(
3874 struct cs_blob *blobs,
316670eb 3875 memory_object_t pager,
2d21ac55
A
3876 memory_object_offset_t page_offset,
3877 const void *data,
39037602 3878 vm_size_t *bytes_processed,
c18c124e 3879 unsigned *tainted)
91447636 3880{
3e170ce0 3881 union cs_hash_union mdctx;
d190cdc3 3882 struct cs_hash const *hashtype = NULL;
3e170ce0 3883 unsigned char actual_hash[CS_HASH_MAX_SIZE];
490019cf 3884 unsigned char expected_hash[CS_HASH_MAX_SIZE];
2d21ac55 3885 boolean_t found_hash;
39037602 3886 struct cs_blob *blob;
2d21ac55 3887 const CS_CodeDirectory *cd;
2d21ac55
A
3888 const unsigned char *hash;
3889 boolean_t validated;
3890 off_t offset; /* page offset in the file */
3891 size_t size;
3892 off_t codeLimit = 0;
3e170ce0 3893 const char *lower_bound, *upper_bound;
2d21ac55 3894 vm_offset_t kaddr, blob_addr;
2d21ac55
A
3895
3896 /* retrieve the expected hash */
3897 found_hash = FALSE;
2d21ac55
A
3898
3899 for (blob = blobs;
3900 blob != NULL;
3901 blob = blob->csb_next) {
3902 offset = page_offset - blob->csb_base_offset;
3903 if (offset < blob->csb_start_offset ||
3904 offset >= blob->csb_end_offset) {
3905 /* our page is not covered by this blob */
3906 continue;
3907 }
3908
39037602 3909 /* blob data has been released */
2d21ac55
A
3910 kaddr = blob->csb_mem_kaddr;
3911 if (kaddr == 0) {
39037602 3912 continue;
2d21ac55 3913 }
39236c6e 3914
2d21ac55 3915 blob_addr = kaddr + blob->csb_mem_offset;
2d21ac55
A
3916 lower_bound = CAST_DOWN(char *, blob_addr);
3917 upper_bound = lower_bound + blob->csb_mem_size;
490019cf
A
3918
3919 cd = blob->csb_cd;
2d21ac55 3920 if (cd != NULL) {
3e170ce0 3921 /* all CD's that have been injected is already validated */
b0d623f7 3922
3e170ce0
A
3923 hashtype = blob->csb_hashtype;
3924 if (hashtype == NULL)
3925 panic("unknown hash type ?");
3926 if (hashtype->cs_digest_size > sizeof(actual_hash))
3927 panic("hash size too large");
39037602
A
3928 if (offset & blob->csb_hash_pagemask)
3929 panic("offset not aligned to cshash boundary");
3e170ce0 3930
2d21ac55 3931 codeLimit = ntohl(cd->codeLimit);
39236c6e 3932
39037602 3933 hash = hashes(cd, (uint32_t)(offset>>blob->csb_hash_pageshift),
3e170ce0 3934 hashtype->cs_size,
2d21ac55 3935 lower_bound, upper_bound);
cf7d32b8 3936 if (hash != NULL) {
490019cf 3937 bcopy(hash, expected_hash, hashtype->cs_size);
cf7d32b8
A
3938 found_hash = TRUE;
3939 }
2d21ac55 3940
2d21ac55
A
3941 break;
3942 }
3943 }
3944
3945 if (found_hash == FALSE) {
3946 /*
3947 * We can't verify this page because there is no signature
3948 * for it (yet). It's possible that this part of the object
3949 * is not signed, or that signatures for that part have not
3950 * been loaded yet.
3951 * Report that the page has not been validated and let the
3952 * caller decide if it wants to accept it or not.
3953 */
3954 cs_validate_page_no_hash++;
3955 if (cs_debug > 1) {
3956 printf("CODE SIGNING: cs_validate_page: "
316670eb
A
3957 "mobj %p off 0x%llx: no hash to validate !?\n",
3958 pager, page_offset);
2d21ac55
A
3959 }
3960 validated = FALSE;
c18c124e 3961 *tainted = 0;
2d21ac55 3962 } else {
2d21ac55 3963
c18c124e
A
3964 *tainted = 0;
3965
39037602
A
3966 size = blob->csb_hash_pagesize;
3967 *bytes_processed = size;
3968
fe8ab488 3969 const uint32_t *asha1, *esha1;
b0d623f7 3970 if ((off_t)(offset + size) > codeLimit) {
2d21ac55
A
3971 /* partial page at end of segment */
3972 assert(offset < codeLimit);
39037602 3973 size = (size_t) (codeLimit & blob->csb_hash_pagemask);
c18c124e 3974 *tainted |= CS_VALIDATE_NX;
2d21ac55 3975 }
3e170ce0
A
3976
3977 hashtype->cs_init(&mdctx);
39037602
A
3978
3979 if (blob->csb_hash_firstlevel_pagesize) {
3980 const unsigned char *partial_data = (const unsigned char *)data;
3981 size_t i;
3982 for (i=0; i < size;) {
3983 union cs_hash_union partialctx;
3984 unsigned char partial_digest[CS_HASH_MAX_SIZE];
3985 size_t partial_size = MIN(size-i, blob->csb_hash_firstlevel_pagesize);
3986
3987 hashtype->cs_init(&partialctx);
3988 hashtype->cs_update(&partialctx, partial_data, partial_size);
3989 hashtype->cs_final(partial_digest, &partialctx);
3990
3991 /* Update cumulative multi-level hash */
3992 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
3993 partial_data = partial_data + partial_size;
3994 i += partial_size;
3995 }
3996 } else {
3997 hashtype->cs_update(&mdctx, data, size);
3998 }
3e170ce0 3999 hashtype->cs_final(actual_hash, &mdctx);
2d21ac55 4000
fe8ab488
A
4001 asha1 = (const uint32_t *) actual_hash;
4002 esha1 = (const uint32_t *) expected_hash;
4003
490019cf 4004 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
2d21ac55
A
4005 if (cs_debug) {
4006 printf("CODE SIGNING: cs_validate_page: "
fe8ab488
A
4007 "mobj %p off 0x%llx size 0x%lx: "
4008 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4009 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4010 pager, page_offset, size,
4011 asha1[0], asha1[1], asha1[2],
4012 asha1[3], asha1[4],
4013 esha1[0], esha1[1], esha1[2],
4014 esha1[3], esha1[4]);
2d21ac55
A
4015 }
4016 cs_validate_page_bad_hash++;
c18c124e 4017 *tainted |= CS_VALIDATE_TAINTED;
2d21ac55 4018 } else {
39236c6e 4019 if (cs_debug > 10) {
2d21ac55 4020 printf("CODE SIGNING: cs_validate_page: "
316670eb
A
4021 "mobj %p off 0x%llx size 0x%lx: "
4022 "SHA1 OK\n",
4023 pager, page_offset, size);
2d21ac55 4024 }
2d21ac55
A
4025 }
4026 validated = TRUE;
4027 }
4028
4029 return validated;
91447636
A
4030}
4031
39037602
A
4032boolean_t
4033cs_validate_range(
4034 struct vnode *vp,
4035 memory_object_t pager,
4036 memory_object_offset_t page_offset,
4037 const void *data,
4038 vm_size_t dsize,
4039 unsigned *tainted)
4040{
4041 vm_size_t offset_in_range;
4042 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4043
4044 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4045
4046 *tainted = 0;
4047
4048 for (offset_in_range = 0;
4049 offset_in_range < dsize;
4050 /* offset_in_range updated based on bytes processed */) {
4051 unsigned subrange_tainted = 0;
4052 boolean_t subrange_validated;
4053 vm_size_t bytes_processed = 0;
4054
4055 subrange_validated = cs_validate_hash(blobs,
4056 pager,
4057 page_offset + offset_in_range,
4058 (const void *)((const char *)data + offset_in_range),
4059 &bytes_processed,
4060 &subrange_tainted);
4061
4062 *tainted |= subrange_tainted;
4063
4064 if (bytes_processed == 0) {
4065 /* Cannote make forward progress, so return an error */
4066 all_subranges_validated = FALSE;
4067 break;
4068 } else if (subrange_validated == FALSE) {
4069 all_subranges_validated = FALSE;
4070 /* Keep going to detect other types of failures in subranges */
4071 }
4072
4073 offset_in_range += bytes_processed;
4074 }
4075
4076 return all_subranges_validated;
4077}
4078
2d21ac55
A
4079int
4080ubc_cs_getcdhash(
4081 vnode_t vp,
4082 off_t offset,
4083 unsigned char *cdhash)
4084{
b0d623f7
A
4085 struct cs_blob *blobs, *blob;
4086 off_t rel_offset;
4087 int ret;
4088
4089 vnode_lock(vp);
2d21ac55
A
4090
4091 blobs = ubc_get_cs_blobs(vp);
4092 for (blob = blobs;
4093 blob != NULL;
4094 blob = blob->csb_next) {
4095 /* compute offset relative to this blob */
4096 rel_offset = offset - blob->csb_base_offset;
4097 if (rel_offset >= blob->csb_start_offset &&
4098 rel_offset < blob->csb_end_offset) {
4099 /* this blob does cover our "offset" ! */
4100 break;
4101 }
4102 }
4103
4104 if (blob == NULL) {
4105 /* we didn't find a blob covering "offset" */
b0d623f7
A
4106 ret = EBADEXEC; /* XXX any better error ? */
4107 } else {
4108 /* get the SHA1 hash of that blob */
3e170ce0 4109 bcopy(blob->csb_cdhash, cdhash, sizeof (blob->csb_cdhash));
b0d623f7 4110 ret = 0;
2d21ac55
A
4111 }
4112
b0d623f7 4113 vnode_unlock(vp);
2d21ac55 4114
b0d623f7 4115 return ret;
2d21ac55 4116}
6d2010ae 4117
39037602
A
4118boolean_t
4119ubc_cs_is_range_codesigned(
4120 vnode_t vp,
4121 mach_vm_offset_t start,
4122 mach_vm_size_t size)
4123{
4124 struct cs_blob *csblob;
4125 mach_vm_offset_t blob_start;
4126 mach_vm_offset_t blob_end;
4127
4128 if (vp == NULL) {
4129 /* no file: no code signature */
4130 return FALSE;
4131 }
4132 if (size == 0) {
4133 /* no range: no code signature */
4134 return FALSE;
4135 }
4136 if (start + size < start) {
4137 /* overflow */
4138 return FALSE;
4139 }
4140
4141 csblob = ubc_cs_blob_get(vp, -1, start);
4142 if (csblob == NULL) {
4143 return FALSE;
4144 }
4145
4146 /*
4147 * We currently check if the range is covered by a single blob,
4148 * which should always be the case for the dyld shared cache.
4149 * If we ever want to make this routine handle other cases, we
4150 * would have to iterate if the blob does not cover the full range.
4151 */
4152 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
4153 csblob->csb_start_offset);
4154 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
4155 csblob->csb_end_offset);
4156 if (blob_start > start || blob_end < (start + size)) {
4157 /* range not fully covered by this code-signing blob */
4158 return FALSE;
4159 }
4160
4161 return TRUE;
4162}
4163
6d2010ae 4164#if CHECK_CS_VALIDATION_BITMAP
5ba3f43e 4165#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6d2010ae
A
4166extern boolean_t root_fs_upgrade_try;
4167
4168/*
4169 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4170 * Depends:
4171 * a) Is the target vnode on the root filesystem?
4172 * b) Has someone tried to mount the root filesystem read-write?
4173 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4174 */
4175#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4176kern_return_t
4177ubc_cs_validation_bitmap_allocate(
4178 vnode_t vp)
4179{
4180 kern_return_t kr = KERN_SUCCESS;
4181 struct ubc_info *uip;
4182 char *target_bitmap;
4183 vm_object_size_t bitmap_size;
4184
4185 if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) {
4186 kr = KERN_INVALID_ARGUMENT;
4187 } else {
4188 uip = vp->v_ubcinfo;
4189
4190 if ( uip->cs_valid_bitmap == NULL ) {
4191 bitmap_size = stob(uip->ui_size);
4192 target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size );
4193 if (target_bitmap == 0) {
4194 kr = KERN_NO_SPACE;
4195 } else {
4196 kr = KERN_SUCCESS;
4197 }
4198 if( kr == KERN_SUCCESS ) {
4199 memset( target_bitmap, 0, (size_t)bitmap_size);
4200 uip->cs_valid_bitmap = (void*)target_bitmap;
4201 uip->cs_valid_bitmap_size = bitmap_size;
4202 }
4203 }
4204 }
4205 return kr;
4206}
4207
4208kern_return_t
4209ubc_cs_check_validation_bitmap (
4210 vnode_t vp,
4211 memory_object_offset_t offset,
4212 int optype)
4213{
4214 kern_return_t kr = KERN_SUCCESS;
4215
4216 if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) {
4217 kr = KERN_INVALID_ARGUMENT;
4218 } else {
4219 struct ubc_info *uip = vp->v_ubcinfo;
4220 char *target_bitmap = uip->cs_valid_bitmap;
4221
4222 if ( target_bitmap == NULL ) {
4223 kr = KERN_INVALID_ARGUMENT;
4224 } else {
4225 uint64_t bit, byte;
4226 bit = atop_64( offset );
4227 byte = bit >> 3;
4228
4229 if ( byte > uip->cs_valid_bitmap_size ) {
4230 kr = KERN_INVALID_ARGUMENT;
4231 } else {
4232
4233 if (optype == CS_BITMAP_SET) {
4234 target_bitmap[byte] |= (1 << (bit & 07));
4235 kr = KERN_SUCCESS;
4236 } else if (optype == CS_BITMAP_CLEAR) {
4237 target_bitmap[byte] &= ~(1 << (bit & 07));
4238 kr = KERN_SUCCESS;
4239 } else if (optype == CS_BITMAP_CHECK) {
4240 if ( target_bitmap[byte] & (1 << (bit & 07))) {
4241 kr = KERN_SUCCESS;
4242 } else {
4243 kr = KERN_FAILURE;
4244 }
4245 }
4246 }
4247 }
4248 }
4249 return kr;
4250}
4251
4252void
4253ubc_cs_validation_bitmap_deallocate(
4254 vnode_t vp)
4255{
4256 struct ubc_info *uip;
4257 void *target_bitmap;
4258 vm_object_size_t bitmap_size;
4259
4260 if ( UBCINFOEXISTS(vp)) {
4261 uip = vp->v_ubcinfo;
4262
4263 if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) {
4264 bitmap_size = uip->cs_valid_bitmap_size;
4265 kfree( target_bitmap, (vm_size_t) bitmap_size );
4266 uip->cs_valid_bitmap = NULL;
4267 }
4268 }
4269}
4270#else
4271kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){
4272 return KERN_INVALID_ARGUMENT;
4273}
4274
4275kern_return_t ubc_cs_check_validation_bitmap(
4276 __unused struct vnode *vp,
4277 __unused memory_object_offset_t offset,
4278 __unused int optype){
4279
4280 return KERN_INVALID_ARGUMENT;
4281}
4282
4283void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){
4284 return;
4285}
4286#endif /* CHECK_CS_VALIDATION_BITMAP */
d9a64523
A
4287
4288#if PMAP_CS
4289kern_return_t
4290cs_associate_blob_with_mapping(
4291 void *pmap,
4292 vm_map_offset_t start,
4293 vm_map_size_t size,
4294 vm_object_offset_t offset,
4295 void *blobs_p)
4296{
4297 off_t blob_start_offset, blob_end_offset;
4298 kern_return_t kr;
4299 struct cs_blob *blobs, *blob;
4300 vm_offset_t kaddr;
4301 struct pmap_cs_code_directory *cd_entry = NULL;
4302
4303 if (!pmap_cs) {
4304 return KERN_NOT_SUPPORTED;
4305 }
4306
4307 blobs = (struct cs_blob *)blobs_p;
4308
4309 for (blob = blobs;
4310 blob != NULL;
4311 blob = blob->csb_next) {
4312 blob_start_offset = (blob->csb_base_offset +
4313 blob->csb_start_offset);
4314 blob_end_offset = (blob->csb_base_offset +
4315 blob->csb_end_offset);
4316 if ((off_t) offset < blob_start_offset ||
4317 (off_t) offset >= blob_end_offset ||
4318 (off_t) (offset + size) <= blob_start_offset ||
4319 (off_t) (offset + size) > blob_end_offset) {
4320 continue;
4321 }
4322 kaddr = blob->csb_mem_kaddr;
4323 if (kaddr == 0) {
4324 /* blob data has been released */
4325 continue;
4326 }
4327 cd_entry = blob->csb_pmap_cs_entry;
4328 if (cd_entry == NULL) {
4329 continue;
4330 }
4331
4332 break;
4333 }
4334
4335 if (cd_entry != NULL) {
4336 kr = pmap_cs_associate(pmap,
4337 cd_entry,
4338 start,
4339 size);
4340 } else {
4341 kr = KERN_CODESIGN_ERROR;
4342 }
4343#if 00
4344 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
4345 kr = KERN_SUCCESS;
4346#endif
4347 return kr;
4348}
4349#endif /* PMAP_CS */