]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
fe8ab488 | 2 | * Copyright (c) 1999-2014 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * File: ubc_subr.c | |
30 | * Author: Umesh Vaishampayan [umeshv@apple.com] | |
31 | * 05-Aug-1999 umeshv Created. | |
32 | * | |
33 | * Functions related to Unified Buffer cache. | |
34 | * | |
0b4e3aa0 A |
35 | * Caller of UBC functions MUST have a valid reference on the vnode. |
36 | * | |
1c79356b A |
37 | */ |
38 | ||
1c79356b A |
39 | #include <sys/types.h> |
40 | #include <sys/param.h> | |
41 | #include <sys/systm.h> | |
42 | #include <sys/lock.h> | |
91447636 A |
43 | #include <sys/mman.h> |
44 | #include <sys/mount_internal.h> | |
45 | #include <sys/vnode_internal.h> | |
46 | #include <sys/ubc_internal.h> | |
1c79356b | 47 | #include <sys/ucred.h> |
91447636 A |
48 | #include <sys/proc_internal.h> |
49 | #include <sys/kauth.h> | |
1c79356b | 50 | #include <sys/buf.h> |
13fec989 | 51 | #include <sys/user.h> |
2d21ac55 | 52 | #include <sys/codesign.h> |
fe8ab488 A |
53 | #include <sys/codedir_internal.h> |
54 | #include <sys/fsevents.h> | |
c18c124e | 55 | #include <sys/fcntl.h> |
1c79356b A |
56 | |
57 | #include <mach/mach_types.h> | |
58 | #include <mach/memory_object_types.h> | |
91447636 A |
59 | #include <mach/memory_object_control.h> |
60 | #include <mach/vm_map.h> | |
b0d623f7 | 61 | #include <mach/mach_vm.h> |
91447636 | 62 | #include <mach/upl.h> |
1c79356b | 63 | |
91447636 | 64 | #include <kern/kern_types.h> |
2d21ac55 | 65 | #include <kern/kalloc.h> |
1c79356b | 66 | #include <kern/zalloc.h> |
13fec989 | 67 | #include <kern/thread.h> |
91447636 A |
68 | #include <vm/vm_kern.h> |
69 | #include <vm/vm_protos.h> /* last */ | |
1c79356b | 70 | |
2d21ac55 | 71 | #include <libkern/crypto/sha1.h> |
3e170ce0 | 72 | #include <libkern/crypto/sha2.h> |
39236c6e A |
73 | #include <libkern/libkern.h> |
74 | ||
593a1d5f | 75 | #include <security/mac_framework.h> |
fe8ab488 | 76 | #include <stdbool.h> |
593a1d5f | 77 | |
2d21ac55 A |
78 | /* XXX These should be in a BSD accessible Mach header, but aren't. */ |
79 | extern kern_return_t memory_object_pages_resident(memory_object_control_t, | |
80 | boolean_t *); | |
81 | extern kern_return_t memory_object_signed(memory_object_control_t control, | |
82 | boolean_t is_signed); | |
6d2010ae | 83 | extern boolean_t memory_object_is_slid(memory_object_control_t control); |
39236c6e | 84 | extern boolean_t memory_object_is_signed(memory_object_control_t); |
6d2010ae | 85 | |
2d21ac55 A |
86 | extern void Debugger(const char *message); |
87 | ||
88 | ||
89 | /* XXX no one uses this interface! */ | |
90 | kern_return_t ubc_page_op_with_control( | |
91 | memory_object_control_t control, | |
92 | off_t f_offset, | |
93 | int ops, | |
94 | ppnum_t *phys_entryp, | |
95 | int *flagsp); | |
96 | ||
97 | ||
1c79356b A |
98 | #if DIAGNOSTIC |
99 | #if defined(assert) | |
b0d623f7 | 100 | #undef assert |
1c79356b A |
101 | #endif |
102 | #define assert(cond) \ | |
2d21ac55 | 103 | ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond))) |
1c79356b A |
104 | #else |
105 | #include <kern/assert.h> | |
106 | #endif /* DIAGNOSTIC */ | |
107 | ||
2d21ac55 | 108 | static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize); |
0c530ab8 | 109 | static int ubc_umcallback(vnode_t, void *); |
0c530ab8 | 110 | static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *); |
2d21ac55 | 111 | static void ubc_cs_free(struct ubc_info *uip); |
b4c24cb9 | 112 | |
39037602 A |
113 | static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob); |
114 | static void ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob); | |
115 | ||
91447636 | 116 | struct zone *ubc_info_zone; |
fe8ab488 | 117 | static uint32_t cs_blob_generation_count = 1; |
2d21ac55 A |
118 | |
119 | /* | |
120 | * CODESIGNING | |
121 | * Routines to navigate code signing data structures in the kernel... | |
122 | */ | |
b0d623f7 A |
123 | |
124 | extern int cs_debug; | |
125 | ||
fe8ab488 | 126 | #define PAGE_SHIFT_4K (12) |
fe8ab488 | 127 | |
2d21ac55 A |
128 | static boolean_t |
129 | cs_valid_range( | |
130 | const void *start, | |
131 | const void *end, | |
132 | const void *lower_bound, | |
133 | const void *upper_bound) | |
134 | { | |
135 | if (upper_bound < lower_bound || | |
136 | end < start) { | |
137 | return FALSE; | |
138 | } | |
139 | ||
140 | if (start < lower_bound || | |
141 | end > upper_bound) { | |
142 | return FALSE; | |
143 | } | |
144 | ||
145 | return TRUE; | |
146 | } | |
147 | ||
3e170ce0 A |
148 | typedef void (*cs_md_init)(void *ctx); |
149 | typedef void (*cs_md_update)(void *ctx, const void *data, size_t size); | |
150 | typedef void (*cs_md_final)(void *hash, void *ctx); | |
151 | ||
152 | struct cs_hash { | |
490019cf A |
153 | uint8_t cs_type; /* type code as per code signing */ |
154 | size_t cs_size; /* size of effective hash (may be truncated) */ | |
155 | size_t cs_digest_size; /* size of native hash */ | |
3e170ce0 A |
156 | cs_md_init cs_init; |
157 | cs_md_update cs_update; | |
158 | cs_md_final cs_final; | |
159 | }; | |
160 | ||
161 | static struct cs_hash cs_hash_sha1 = { | |
162 | .cs_type = CS_HASHTYPE_SHA1, | |
3e170ce0 A |
163 | .cs_size = CS_SHA1_LEN, |
164 | .cs_digest_size = SHA_DIGEST_LENGTH, | |
165 | .cs_init = (cs_md_init)SHA1Init, | |
166 | .cs_update = (cs_md_update)SHA1Update, | |
167 | .cs_final = (cs_md_final)SHA1Final, | |
168 | }; | |
169 | #if CRYPTO_SHA2 | |
170 | static struct cs_hash cs_hash_sha256 = { | |
171 | .cs_type = CS_HASHTYPE_SHA256, | |
3e170ce0 A |
172 | .cs_size = SHA256_DIGEST_LENGTH, |
173 | .cs_digest_size = SHA256_DIGEST_LENGTH, | |
174 | .cs_init = (cs_md_init)SHA256_Init, | |
175 | .cs_update = (cs_md_update)SHA256_Update, | |
176 | .cs_final = (cs_md_final)SHA256_Final, | |
177 | }; | |
178 | static struct cs_hash cs_hash_sha256_truncate = { | |
179 | .cs_type = CS_HASHTYPE_SHA256_TRUNCATED, | |
3e170ce0 A |
180 | .cs_size = CS_SHA256_TRUNCATED_LEN, |
181 | .cs_digest_size = SHA256_DIGEST_LENGTH, | |
182 | .cs_init = (cs_md_init)SHA256_Init, | |
183 | .cs_update = (cs_md_update)SHA256_Update, | |
184 | .cs_final = (cs_md_final)SHA256_Final, | |
185 | }; | |
490019cf A |
186 | static struct cs_hash cs_hash_sha384 = { |
187 | .cs_type = CS_HASHTYPE_SHA384, | |
188 | .cs_size = SHA384_DIGEST_LENGTH, | |
189 | .cs_digest_size = SHA384_DIGEST_LENGTH, | |
190 | .cs_init = (cs_md_init)SHA384_Init, | |
191 | .cs_update = (cs_md_update)SHA384_Update, | |
192 | .cs_final = (cs_md_final)SHA384_Final, | |
193 | }; | |
3e170ce0 | 194 | #endif |
39037602 | 195 | |
3e170ce0 A |
196 | static struct cs_hash * |
197 | cs_find_md(uint8_t type) | |
198 | { | |
199 | if (type == CS_HASHTYPE_SHA1) { | |
200 | return &cs_hash_sha1; | |
201 | #if CRYPTO_SHA2 | |
202 | } else if (type == CS_HASHTYPE_SHA256) { | |
203 | return &cs_hash_sha256; | |
204 | } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) { | |
205 | return &cs_hash_sha256_truncate; | |
490019cf A |
206 | } else if (type == CS_HASHTYPE_SHA384) { |
207 | return &cs_hash_sha384; | |
3e170ce0 A |
208 | #endif |
209 | } | |
210 | return NULL; | |
211 | } | |
212 | ||
213 | union cs_hash_union { | |
214 | SHA1_CTX sha1ctxt; | |
215 | SHA256_CTX sha256ctx; | |
490019cf | 216 | SHA384_CTX sha384ctx; |
3e170ce0 A |
217 | }; |
218 | ||
219 | ||
2d21ac55 | 220 | /* |
490019cf A |
221 | * Choose among different hash algorithms. |
222 | * Higher is better, 0 => don't use at all. | |
2d21ac55 | 223 | */ |
490019cf A |
224 | static uint32_t hashPriorities[] = { |
225 | CS_HASHTYPE_SHA1, | |
226 | CS_HASHTYPE_SHA256_TRUNCATED, | |
227 | CS_HASHTYPE_SHA256, | |
228 | CS_HASHTYPE_SHA384, | |
229 | }; | |
b0d623f7 | 230 | |
490019cf A |
231 | static unsigned int |
232 | hash_rank(const CS_CodeDirectory *cd) | |
233 | { | |
234 | uint32_t type = cd->hashType; | |
235 | unsigned int n; | |
2d21ac55 | 236 | |
490019cf A |
237 | for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) |
238 | if (hashPriorities[n] == type) | |
239 | return n + 1; | |
240 | return 0; /* not supported */ | |
2d21ac55 A |
241 | } |
242 | ||
243 | ||
244 | /* | |
245 | * Locating a page hash | |
246 | */ | |
247 | static const unsigned char * | |
248 | hashes( | |
249 | const CS_CodeDirectory *cd, | |
3e170ce0 A |
250 | uint32_t page, |
251 | size_t hash_len, | |
252 | const char *lower_bound, | |
253 | const char *upper_bound) | |
2d21ac55 A |
254 | { |
255 | const unsigned char *base, *top, *hash; | |
b0d623f7 | 256 | uint32_t nCodeSlots = ntohl(cd->nCodeSlots); |
2d21ac55 A |
257 | |
258 | assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); | |
259 | ||
39236c6e | 260 | if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { |
b0d623f7 | 261 | /* Get first scatter struct */ |
39236c6e | 262 | const SC_Scatter *scatter = (const SC_Scatter*) |
b0d623f7 A |
263 | ((const char*)cd + ntohl(cd->scatterOffset)); |
264 | uint32_t hashindex=0, scount, sbase=0; | |
265 | /* iterate all scatter structs */ | |
266 | do { | |
267 | if((const char*)scatter > (const char*)cd + ntohl(cd->length)) { | |
268 | if(cs_debug) { | |
269 | printf("CODE SIGNING: Scatter extends past Code Directory\n"); | |
270 | } | |
271 | return NULL; | |
272 | } | |
273 | ||
274 | scount = ntohl(scatter->count); | |
275 | uint32_t new_base = ntohl(scatter->base); | |
276 | ||
277 | /* last scatter? */ | |
278 | if (scount == 0) { | |
279 | return NULL; | |
280 | } | |
281 | ||
282 | if((hashindex > 0) && (new_base <= sbase)) { | |
283 | if(cs_debug) { | |
284 | printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", | |
285 | sbase, new_base); | |
286 | } | |
287 | return NULL; /* unordered scatter array */ | |
288 | } | |
289 | sbase = new_base; | |
290 | ||
291 | /* this scatter beyond page we're looking for? */ | |
292 | if (sbase > page) { | |
293 | return NULL; | |
294 | } | |
295 | ||
296 | if (sbase+scount >= page) { | |
297 | /* Found the scatter struct that is | |
298 | * referencing our page */ | |
299 | ||
300 | /* base = address of first hash covered by scatter */ | |
301 | base = (const unsigned char *)cd + ntohl(cd->hashOffset) + | |
3e170ce0 | 302 | hashindex * hash_len; |
b0d623f7 | 303 | /* top = address of first hash after this scatter */ |
3e170ce0 | 304 | top = base + scount * hash_len; |
b0d623f7 A |
305 | if (!cs_valid_range(base, top, lower_bound, |
306 | upper_bound) || | |
307 | hashindex > nCodeSlots) { | |
308 | return NULL; | |
309 | } | |
310 | ||
311 | break; | |
312 | } | |
313 | ||
314 | /* this scatter struct is before the page we're looking | |
315 | * for. Iterate. */ | |
316 | hashindex+=scount; | |
317 | scatter++; | |
318 | } while(1); | |
319 | ||
3e170ce0 | 320 | hash = base + (page - sbase) * hash_len; |
b0d623f7 A |
321 | } else { |
322 | base = (const unsigned char *)cd + ntohl(cd->hashOffset); | |
3e170ce0 | 323 | top = base + nCodeSlots * hash_len; |
b0d623f7 A |
324 | if (!cs_valid_range(base, top, lower_bound, upper_bound) || |
325 | page > nCodeSlots) { | |
326 | return NULL; | |
327 | } | |
328 | assert(page < nCodeSlots); | |
2d21ac55 | 329 | |
3e170ce0 | 330 | hash = base + page * hash_len; |
b0d623f7 A |
331 | } |
332 | ||
3e170ce0 | 333 | if (!cs_valid_range(hash, hash + hash_len, |
2d21ac55 A |
334 | lower_bound, upper_bound)) { |
335 | hash = NULL; | |
336 | } | |
337 | ||
338 | return hash; | |
339 | } | |
39236c6e A |
340 | |
341 | /* | |
342 | * cs_validate_codedirectory | |
343 | * | |
344 | * Validate that pointers inside the code directory to make sure that | |
345 | * all offsets and lengths are constrained within the buffer. | |
346 | * | |
347 | * Parameters: cd Pointer to code directory buffer | |
348 | * length Length of buffer | |
349 | * | |
350 | * Returns: 0 Success | |
351 | * EBADEXEC Invalid code signature | |
352 | */ | |
353 | ||
354 | static int | |
355 | cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) | |
356 | { | |
3e170ce0 | 357 | struct cs_hash *hashtype; |
39236c6e A |
358 | |
359 | if (length < sizeof(*cd)) | |
360 | return EBADEXEC; | |
361 | if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) | |
362 | return EBADEXEC; | |
39037602 | 363 | if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) |
39236c6e | 364 | return EBADEXEC; |
3e170ce0 A |
365 | hashtype = cs_find_md(cd->hashType); |
366 | if (hashtype == NULL) | |
39236c6e A |
367 | return EBADEXEC; |
368 | ||
490019cf | 369 | if (cd->hashSize != hashtype->cs_size) |
3e170ce0 A |
370 | return EBADEXEC; |
371 | ||
39236c6e A |
372 | if (length < ntohl(cd->hashOffset)) |
373 | return EBADEXEC; | |
374 | ||
375 | /* check that nSpecialSlots fits in the buffer in front of hashOffset */ | |
3e170ce0 | 376 | if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) |
39236c6e A |
377 | return EBADEXEC; |
378 | ||
379 | /* check that codeslots fits in the buffer */ | |
3e170ce0 | 380 | if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) |
39236c6e A |
381 | return EBADEXEC; |
382 | ||
383 | if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) { | |
384 | ||
385 | if (length < ntohl(cd->scatterOffset)) | |
386 | return EBADEXEC; | |
387 | ||
3e170ce0 A |
388 | const SC_Scatter *scatter = (const SC_Scatter *) |
389 | (((const uint8_t *)cd) + ntohl(cd->scatterOffset)); | |
39236c6e A |
390 | uint32_t nPages = 0; |
391 | ||
392 | /* | |
393 | * Check each scatter buffer, since we don't know the | |
394 | * length of the scatter buffer array, we have to | |
395 | * check each entry. | |
396 | */ | |
397 | while(1) { | |
398 | /* check that the end of each scatter buffer in within the length */ | |
399 | if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) | |
400 | return EBADEXEC; | |
401 | uint32_t scount = ntohl(scatter->count); | |
402 | if (scount == 0) | |
403 | break; | |
404 | if (nPages + scount < nPages) | |
405 | return EBADEXEC; | |
406 | nPages += scount; | |
407 | scatter++; | |
408 | ||
409 | /* XXX check that basees doesn't overlap */ | |
410 | /* XXX check that targetOffset doesn't overlap */ | |
411 | } | |
412 | #if 0 /* rdar://12579439 */ | |
413 | if (nPages != ntohl(cd->nCodeSlots)) | |
414 | return EBADEXEC; | |
415 | #endif | |
416 | } | |
417 | ||
418 | if (length < ntohl(cd->identOffset)) | |
419 | return EBADEXEC; | |
420 | ||
421 | /* identifier is NUL terminated string */ | |
422 | if (cd->identOffset) { | |
3e170ce0 | 423 | const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset); |
39236c6e A |
424 | if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) |
425 | return EBADEXEC; | |
426 | } | |
427 | ||
fe8ab488 A |
428 | /* team identifier is NULL terminated string */ |
429 | if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) { | |
430 | if (length < ntohl(cd->teamOffset)) | |
431 | return EBADEXEC; | |
432 | ||
3e170ce0 | 433 | const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset); |
fe8ab488 A |
434 | if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) |
435 | return EBADEXEC; | |
436 | } | |
437 | ||
39236c6e A |
438 | return 0; |
439 | } | |
440 | ||
441 | /* | |
442 | * | |
443 | */ | |
444 | ||
445 | static int | |
446 | cs_validate_blob(const CS_GenericBlob *blob, size_t length) | |
447 | { | |
448 | if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) | |
449 | return EBADEXEC; | |
450 | return 0; | |
451 | } | |
452 | ||
453 | /* | |
454 | * cs_validate_csblob | |
455 | * | |
456 | * Validate that superblob/embedded code directory to make sure that | |
457 | * all internal pointers are valid. | |
458 | * | |
459 | * Will validate both a superblob csblob and a "raw" code directory. | |
460 | * | |
461 | * | |
462 | * Parameters: buffer Pointer to code signature | |
463 | * length Length of buffer | |
464 | * rcd returns pointer to code directory | |
465 | * | |
466 | * Returns: 0 Success | |
467 | * EBADEXEC Invalid code signature | |
468 | */ | |
469 | ||
470 | static int | |
471 | cs_validate_csblob(const uint8_t *addr, size_t length, | |
39037602 A |
472 | const CS_CodeDirectory **rcd, |
473 | const CS_GenericBlob **rentitlements) | |
39236c6e | 474 | { |
3e170ce0 | 475 | const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr; |
39236c6e A |
476 | int error; |
477 | ||
478 | *rcd = NULL; | |
39037602 | 479 | *rentitlements = NULL; |
39236c6e A |
480 | |
481 | error = cs_validate_blob(blob, length); | |
482 | if (error) | |
483 | return error; | |
484 | ||
485 | length = ntohl(blob->length); | |
486 | ||
487 | if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { | |
490019cf A |
488 | const CS_SuperBlob *sb; |
489 | uint32_t n, count; | |
490 | const CS_CodeDirectory *best_cd = NULL; | |
491 | unsigned int best_rank = 0; | |
39236c6e A |
492 | |
493 | if (length < sizeof(CS_SuperBlob)) | |
494 | return EBADEXEC; | |
495 | ||
490019cf A |
496 | sb = (const CS_SuperBlob *)blob; |
497 | count = ntohl(sb->count); | |
498 | ||
39236c6e A |
499 | /* check that the array of BlobIndex fits in the rest of the data */ |
500 | if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) | |
501 | return EBADEXEC; | |
502 | ||
503 | /* now check each BlobIndex */ | |
504 | for (n = 0; n < count; n++) { | |
505 | const CS_BlobIndex *blobIndex = &sb->index[n]; | |
490019cf A |
506 | uint32_t type = ntohl(blobIndex->type); |
507 | uint32_t offset = ntohl(blobIndex->offset); | |
508 | if (length < offset) | |
39236c6e A |
509 | return EBADEXEC; |
510 | ||
511 | const CS_GenericBlob *subBlob = | |
490019cf | 512 | (const CS_GenericBlob *)(const void *)(addr + offset); |
39236c6e | 513 | |
490019cf | 514 | size_t subLength = length - offset; |
39236c6e A |
515 | |
516 | if ((error = cs_validate_blob(subBlob, subLength)) != 0) | |
517 | return error; | |
518 | subLength = ntohl(subBlob->length); | |
519 | ||
520 | /* extra validation for CDs, that is also returned */ | |
490019cf A |
521 | if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) { |
522 | const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob; | |
523 | if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) | |
39236c6e | 524 | return error; |
490019cf A |
525 | unsigned int rank = hash_rank(candidate); |
526 | if (cs_debug > 3) | |
527 | printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n); | |
528 | if (best_cd == NULL || rank > best_rank) { | |
529 | best_cd = candidate; | |
530 | best_rank = rank; | |
39037602 A |
531 | |
532 | if (cs_debug > 2) | |
533 | printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank); | |
534 | *rcd = best_cd; | |
490019cf A |
535 | } else if (best_cd != NULL && rank == best_rank) { |
536 | /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */ | |
39037602 A |
537 | printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType); |
538 | return EBADEXEC; | |
539 | } | |
540 | } else if (type == CSSLOT_ENTITLEMENTS) { | |
541 | if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) { | |
542 | return EBADEXEC; | |
543 | } | |
544 | if (*rentitlements != NULL) { | |
545 | printf("multiple entitlements blobs\n"); | |
490019cf A |
546 | return EBADEXEC; |
547 | } | |
39037602 | 548 | *rentitlements = subBlob; |
39236c6e A |
549 | } |
550 | } | |
551 | ||
552 | } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) { | |
553 | ||
3e170ce0 | 554 | if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) |
39236c6e A |
555 | return error; |
556 | *rcd = (const CS_CodeDirectory *)blob; | |
557 | } else { | |
558 | return EBADEXEC; | |
559 | } | |
560 | ||
561 | if (*rcd == NULL) | |
562 | return EBADEXEC; | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | /* | |
568 | * cs_find_blob_bytes | |
569 | * | |
570 | * Find an blob from the superblob/code directory. The blob must have | |
571 | * been been validated by cs_validate_csblob() before calling | |
3e170ce0 | 572 | * this. Use csblob_find_blob() instead. |
39236c6e A |
573 | * |
574 | * Will also find a "raw" code directory if its stored as well as | |
575 | * searching the superblob. | |
576 | * | |
577 | * Parameters: buffer Pointer to code signature | |
578 | * length Length of buffer | |
579 | * type type of blob to find | |
580 | * magic the magic number for that blob | |
581 | * | |
582 | * Returns: pointer Success | |
583 | * NULL Buffer not found | |
584 | */ | |
585 | ||
3e170ce0 A |
586 | const CS_GenericBlob * |
587 | csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic) | |
39236c6e | 588 | { |
3e170ce0 | 589 | const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr; |
39236c6e A |
590 | |
591 | if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { | |
592 | const CS_SuperBlob *sb = (const CS_SuperBlob *)blob; | |
593 | size_t n, count = ntohl(sb->count); | |
594 | ||
595 | for (n = 0; n < count; n++) { | |
596 | if (ntohl(sb->index[n].type) != type) | |
597 | continue; | |
598 | uint32_t offset = ntohl(sb->index[n].offset); | |
599 | if (length - sizeof(const CS_GenericBlob) < offset) | |
600 | return NULL; | |
3e170ce0 | 601 | blob = (const CS_GenericBlob *)(const void *)(addr + offset); |
39236c6e A |
602 | if (ntohl(blob->magic) != magic) |
603 | continue; | |
604 | return blob; | |
605 | } | |
606 | } else if (type == CSSLOT_CODEDIRECTORY | |
607 | && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY | |
608 | && magic == CSMAGIC_CODEDIRECTORY) | |
609 | return blob; | |
610 | return NULL; | |
611 | } | |
612 | ||
613 | ||
fe8ab488 | 614 | const CS_GenericBlob * |
3e170ce0 | 615 | csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic) |
39236c6e A |
616 | { |
617 | if ((csblob->csb_flags & CS_VALID) == 0) | |
618 | return NULL; | |
3e170ce0 | 619 | return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic); |
39236c6e A |
620 | } |
621 | ||
622 | static const uint8_t * | |
3e170ce0 | 623 | find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot) |
39236c6e A |
624 | { |
625 | /* there is no zero special slot since that is the first code slot */ | |
626 | if (ntohl(cd->nSpecialSlots) < slot || slot == 0) | |
627 | return NULL; | |
628 | ||
3e170ce0 | 629 | return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot)); |
39236c6e A |
630 | } |
631 | ||
3e170ce0 | 632 | static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 }; |
39236c6e | 633 | |
6d2010ae | 634 | int |
3e170ce0 | 635 | csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length) |
6d2010ae | 636 | { |
3e170ce0 | 637 | uint8_t computed_hash[CS_HASH_MAX_SIZE]; |
39236c6e A |
638 | const CS_GenericBlob *entitlements; |
639 | const CS_CodeDirectory *code_dir; | |
39236c6e | 640 | const uint8_t *embedded_hash; |
3e170ce0 | 641 | union cs_hash_union context; |
39236c6e A |
642 | |
643 | *out_start = NULL; | |
644 | *out_length = 0; | |
645 | ||
3e170ce0 A |
646 | if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) |
647 | return EBADEXEC; | |
39236c6e | 648 | |
490019cf | 649 | code_dir = csblob->csb_cd; |
39236c6e | 650 | |
39037602 A |
651 | if ((csblob->csb_flags & CS_VALID) == 0) { |
652 | entitlements = NULL; | |
653 | } else { | |
654 | entitlements = csblob->csb_entitlements_blob; | |
655 | } | |
3e170ce0 | 656 | embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS); |
39236c6e A |
657 | |
658 | if (embedded_hash == NULL) { | |
659 | if (entitlements) | |
660 | return EBADEXEC; | |
661 | return 0; | |
490019cf A |
662 | } else if (entitlements == NULL) { |
663 | if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) { | |
664 | return EBADEXEC; | |
665 | } else { | |
666 | return 0; | |
667 | } | |
6d2010ae | 668 | } |
39236c6e | 669 | |
3e170ce0 A |
670 | csblob->csb_hashtype->cs_init(&context); |
671 | csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length)); | |
672 | csblob->csb_hashtype->cs_final(computed_hash, &context); | |
673 | ||
674 | if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) | |
39236c6e A |
675 | return EBADEXEC; |
676 | ||
3e170ce0 | 677 | *out_start = __DECONST(void *, entitlements); |
39236c6e A |
678 | *out_length = ntohl(entitlements->length); |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
6d2010ae | 683 | /* |
3e170ce0 A |
684 | * CODESIGNING |
685 | * End of routines to navigate code signing data structures in the kernel. | |
6d2010ae A |
686 | */ |
687 | ||
688 | ||
2d21ac55 | 689 | |
1c79356b | 690 | /* |
2d21ac55 A |
691 | * ubc_init |
692 | * | |
693 | * Initialization of the zone for Unified Buffer Cache. | |
694 | * | |
695 | * Parameters: (void) | |
696 | * | |
697 | * Returns: (void) | |
698 | * | |
699 | * Implicit returns: | |
700 | * ubc_info_zone(global) initialized for subsequent allocations | |
1c79356b | 701 | */ |
0b4e3aa0 | 702 | __private_extern__ void |
2d21ac55 | 703 | ubc_init(void) |
1c79356b A |
704 | { |
705 | int i; | |
706 | ||
707 | i = (vm_size_t) sizeof (struct ubc_info); | |
2d21ac55 | 708 | |
1c79356b | 709 | ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); |
0b4c1975 A |
710 | |
711 | zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); | |
1c79356b A |
712 | } |
713 | ||
2d21ac55 | 714 | |
1c79356b | 715 | /* |
2d21ac55 A |
716 | * ubc_info_init |
717 | * | |
718 | * Allocate and attach an empty ubc_info structure to a vnode | |
719 | * | |
720 | * Parameters: vp Pointer to the vnode | |
721 | * | |
722 | * Returns: 0 Success | |
723 | * vnode_size:ENOMEM Not enough space | |
724 | * vnode_size:??? Other error from vnode_getattr | |
725 | * | |
1c79356b A |
726 | */ |
727 | int | |
728 | ubc_info_init(struct vnode *vp) | |
91447636 A |
729 | { |
730 | return(ubc_info_init_internal(vp, 0, 0)); | |
731 | } | |
2d21ac55 A |
732 | |
733 | ||
734 | /* | |
735 | * ubc_info_init_withsize | |
736 | * | |
737 | * Allocate and attach a sized ubc_info structure to a vnode | |
738 | * | |
739 | * Parameters: vp Pointer to the vnode | |
740 | * filesize The size of the file | |
741 | * | |
742 | * Returns: 0 Success | |
743 | * vnode_size:ENOMEM Not enough space | |
744 | * vnode_size:??? Other error from vnode_getattr | |
745 | */ | |
91447636 A |
746 | int |
747 | ubc_info_init_withsize(struct vnode *vp, off_t filesize) | |
748 | { | |
749 | return(ubc_info_init_internal(vp, 1, filesize)); | |
750 | } | |
751 | ||
2d21ac55 A |
752 | |
753 | /* | |
754 | * ubc_info_init_internal | |
755 | * | |
756 | * Allocate and attach a ubc_info structure to a vnode | |
757 | * | |
758 | * Parameters: vp Pointer to the vnode | |
759 | * withfsize{0,1} Zero if the size should be obtained | |
760 | * from the vnode; otherwise, use filesize | |
761 | * filesize The size of the file, if withfsize == 1 | |
762 | * | |
763 | * Returns: 0 Success | |
764 | * vnode_size:ENOMEM Not enough space | |
765 | * vnode_size:??? Other error from vnode_getattr | |
766 | * | |
767 | * Notes: We call a blocking zalloc(), and the zone was created as an | |
768 | * expandable and collectable zone, so if no memory is available, | |
769 | * it is possible for zalloc() to block indefinitely. zalloc() | |
770 | * may also panic if the zone of zones is exhausted, since it's | |
771 | * NOT expandable. | |
772 | * | |
773 | * We unconditionally call vnode_pager_setup(), even if this is | |
774 | * a reuse of a ubc_info; in that case, we should probably assert | |
775 | * that it does not already have a pager association, but do not. | |
776 | * | |
777 | * Since memory_object_create_named() can only fail from receiving | |
778 | * an invalid pager argument, the explicit check and panic is | |
779 | * merely precautionary. | |
780 | */ | |
781 | static int | |
782 | ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize) | |
1c79356b | 783 | { |
39037602 | 784 | struct ubc_info *uip; |
1c79356b | 785 | void * pager; |
1c79356b A |
786 | int error = 0; |
787 | kern_return_t kret; | |
0b4e3aa0 | 788 | memory_object_control_t control; |
1c79356b | 789 | |
91447636 | 790 | uip = vp->v_ubcinfo; |
1c79356b | 791 | |
2d21ac55 A |
792 | /* |
793 | * If there is not already a ubc_info attached to the vnode, we | |
794 | * attach one; otherwise, we will reuse the one that's there. | |
795 | */ | |
91447636 | 796 | if (uip == UBC_INFO_NULL) { |
1c79356b | 797 | |
1c79356b | 798 | uip = (struct ubc_info *) zalloc(ubc_info_zone); |
91447636 A |
799 | bzero((char *)uip, sizeof(struct ubc_info)); |
800 | ||
1c79356b | 801 | uip->ui_vnode = vp; |
91447636 | 802 | uip->ui_flags = UI_INITED; |
1c79356b A |
803 | uip->ui_ucred = NOCRED; |
804 | } | |
1c79356b A |
805 | assert(uip->ui_flags != UI_NONE); |
806 | assert(uip->ui_vnode == vp); | |
807 | ||
1c79356b A |
808 | /* now set this ubc_info in the vnode */ |
809 | vp->v_ubcinfo = uip; | |
91447636 | 810 | |
2d21ac55 A |
811 | /* |
812 | * Allocate a pager object for this vnode | |
813 | * | |
814 | * XXX The value of the pager parameter is currently ignored. | |
815 | * XXX Presumably, this API changed to avoid the race between | |
816 | * XXX setting the pager and the UI_HASPAGER flag. | |
817 | */ | |
1c79356b A |
818 | pager = (void *)vnode_pager_setup(vp, uip->ui_pager); |
819 | assert(pager); | |
91447636 | 820 | |
2d21ac55 A |
821 | /* |
822 | * Explicitly set the pager into the ubc_info, after setting the | |
823 | * UI_HASPAGER flag. | |
824 | */ | |
91447636 A |
825 | SET(uip->ui_flags, UI_HASPAGER); |
826 | uip->ui_pager = pager; | |
1c79356b A |
827 | |
828 | /* | |
91447636 | 829 | * Note: We can not use VNOP_GETATTR() to get accurate |
2d21ac55 A |
830 | * value of ui_size because this may be an NFS vnode, and |
831 | * nfs_getattr() can call vinvalbuf(); if this happens, | |
832 | * ubc_info is not set up to deal with that event. | |
1c79356b A |
833 | * So use bogus size. |
834 | */ | |
835 | ||
1c79356b | 836 | /* |
0b4e3aa0 A |
837 | * create a vnode - vm_object association |
838 | * memory_object_create_named() creates a "named" reference on the | |
839 | * memory object we hold this reference as long as the vnode is | |
840 | * "alive." Since memory_object_create_named() took its own reference | |
841 | * on the vnode pager we passed it, we can drop the reference | |
842 | * vnode_pager_setup() returned here. | |
1c79356b | 843 | */ |
0b4e3aa0 A |
844 | kret = memory_object_create_named(pager, |
845 | (memory_object_size_t)uip->ui_size, &control); | |
846 | vnode_pager_deallocate(pager); | |
847 | if (kret != KERN_SUCCESS) | |
848 | panic("ubc_info_init: memory_object_create_named returned %d", kret); | |
1c79356b | 849 | |
0b4e3aa0 A |
850 | assert(control); |
851 | uip->ui_control = control; /* cache the value of the mo control */ | |
852 | SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */ | |
2d21ac55 | 853 | |
91447636 | 854 | if (withfsize == 0) { |
91447636 | 855 | /* initialize the size */ |
2d21ac55 | 856 | error = vnode_size(vp, &uip->ui_size, vfs_context_current()); |
91447636 A |
857 | if (error) |
858 | uip->ui_size = 0; | |
859 | } else { | |
860 | uip->ui_size = filesize; | |
861 | } | |
2d21ac55 | 862 | vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */ |
1c79356b | 863 | |
0b4e3aa0 | 864 | return (error); |
1c79356b A |
865 | } |
866 | ||
2d21ac55 A |
867 | |
868 | /* | |
869 | * ubc_info_free | |
870 | * | |
871 | * Free a ubc_info structure | |
872 | * | |
873 | * Parameters: uip A pointer to the ubc_info to free | |
874 | * | |
875 | * Returns: (void) | |
876 | * | |
877 | * Notes: If there is a credential that has subsequently been associated | |
878 | * with the ubc_info via a call to ubc_setcred(), the reference | |
879 | * to the credential is dropped. | |
880 | * | |
881 | * It's actually impossible for a ubc_info.ui_control to take the | |
882 | * value MEMORY_OBJECT_CONTROL_NULL. | |
883 | */ | |
0b4e3aa0 A |
884 | static void |
885 | ubc_info_free(struct ubc_info *uip) | |
1c79356b | 886 | { |
0c530ab8 A |
887 | if (IS_VALID_CRED(uip->ui_ucred)) { |
888 | kauth_cred_unref(&uip->ui_ucred); | |
1c79356b | 889 | } |
0b4e3aa0 A |
890 | |
891 | if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) | |
892 | memory_object_control_deallocate(uip->ui_control); | |
91447636 A |
893 | |
894 | cluster_release(uip); | |
2d21ac55 | 895 | ubc_cs_free(uip); |
0b4e3aa0 | 896 | |
2d21ac55 | 897 | zfree(ubc_info_zone, uip); |
1c79356b A |
898 | return; |
899 | } | |
900 | ||
2d21ac55 | 901 | |
0b4e3aa0 A |
902 | void |
903 | ubc_info_deallocate(struct ubc_info *uip) | |
904 | { | |
91447636 | 905 | ubc_info_free(uip); |
0b4e3aa0 A |
906 | } |
907 | ||
3e170ce0 | 908 | errno_t mach_to_bsd_errno(kern_return_t mach_err) |
fe8ab488 A |
909 | { |
910 | switch (mach_err) { | |
911 | case KERN_SUCCESS: | |
912 | return 0; | |
913 | ||
914 | case KERN_INVALID_ADDRESS: | |
915 | case KERN_INVALID_ARGUMENT: | |
916 | case KERN_NOT_IN_SET: | |
917 | case KERN_INVALID_NAME: | |
918 | case KERN_INVALID_TASK: | |
919 | case KERN_INVALID_RIGHT: | |
920 | case KERN_INVALID_VALUE: | |
921 | case KERN_INVALID_CAPABILITY: | |
922 | case KERN_INVALID_HOST: | |
923 | case KERN_MEMORY_PRESENT: | |
924 | case KERN_INVALID_PROCESSOR_SET: | |
925 | case KERN_INVALID_POLICY: | |
926 | case KERN_ALREADY_WAITING: | |
927 | case KERN_DEFAULT_SET: | |
928 | case KERN_EXCEPTION_PROTECTED: | |
929 | case KERN_INVALID_LEDGER: | |
930 | case KERN_INVALID_MEMORY_CONTROL: | |
931 | case KERN_INVALID_SECURITY: | |
932 | case KERN_NOT_DEPRESSED: | |
933 | case KERN_LOCK_OWNED: | |
934 | case KERN_LOCK_OWNED_SELF: | |
935 | return EINVAL; | |
936 | ||
937 | case KERN_PROTECTION_FAILURE: | |
938 | case KERN_NOT_RECEIVER: | |
939 | case KERN_NO_ACCESS: | |
940 | case KERN_POLICY_STATIC: | |
941 | return EACCES; | |
942 | ||
943 | case KERN_NO_SPACE: | |
944 | case KERN_RESOURCE_SHORTAGE: | |
945 | case KERN_UREFS_OVERFLOW: | |
946 | case KERN_INVALID_OBJECT: | |
947 | return ENOMEM; | |
948 | ||
949 | case KERN_FAILURE: | |
950 | return EIO; | |
951 | ||
952 | case KERN_MEMORY_FAILURE: | |
953 | case KERN_POLICY_LIMIT: | |
954 | case KERN_CODESIGN_ERROR: | |
955 | return EPERM; | |
956 | ||
957 | case KERN_MEMORY_ERROR: | |
958 | return EBUSY; | |
959 | ||
960 | case KERN_ALREADY_IN_SET: | |
961 | case KERN_NAME_EXISTS: | |
962 | case KERN_RIGHT_EXISTS: | |
963 | return EEXIST; | |
964 | ||
965 | case KERN_ABORTED: | |
966 | return EINTR; | |
967 | ||
968 | case KERN_TERMINATED: | |
969 | case KERN_LOCK_SET_DESTROYED: | |
970 | case KERN_LOCK_UNSTABLE: | |
971 | case KERN_SEMAPHORE_DESTROYED: | |
972 | return ENOENT; | |
973 | ||
974 | case KERN_RPC_SERVER_TERMINATED: | |
975 | return ECONNRESET; | |
976 | ||
977 | case KERN_NOT_SUPPORTED: | |
978 | return ENOTSUP; | |
979 | ||
980 | case KERN_NODE_DOWN: | |
981 | return ENETDOWN; | |
982 | ||
983 | case KERN_NOT_WAITING: | |
984 | return ENOENT; | |
985 | ||
986 | case KERN_OPERATION_TIMED_OUT: | |
987 | return ETIMEDOUT; | |
988 | ||
989 | default: | |
990 | return EIO; | |
991 | } | |
992 | } | |
2d21ac55 | 993 | |
1c79356b | 994 | /* |
fe8ab488 | 995 | * ubc_setsize_ex |
2d21ac55 | 996 | * |
fe8ab488 | 997 | * Tell the VM that the the size of the file represented by the vnode has |
2d21ac55 A |
998 | * changed |
999 | * | |
fe8ab488 A |
1000 | * Parameters: vp The vp whose backing file size is |
1001 | * being changed | |
1002 | * nsize The new size of the backing file | |
1003 | * opts Options | |
1004 | * | |
1005 | * Returns: EINVAL for new size < 0 | |
1006 | * ENOENT if no UBC info exists | |
1007 | * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size | |
1008 | * Other errors (mapped to errno_t) returned by VM functions | |
1009 | * | |
1010 | * Notes: This function will indicate success if the new size is the | |
1011 | * same or larger than the old size (in this case, the | |
1012 | * remainder of the file will require modification or use of | |
1013 | * an existing upl to access successfully). | |
1014 | * | |
1015 | * This function will fail if the new file size is smaller, | |
1016 | * and the memory region being invalidated was unable to | |
1017 | * actually be invalidated and/or the last page could not be | |
1018 | * flushed, if the new size is not aligned to a page | |
1019 | * boundary. This is usually indicative of an I/O error. | |
1c79356b | 1020 | */ |
fe8ab488 | 1021 | errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) |
1c79356b A |
1022 | { |
1023 | off_t osize; /* ui_size before change */ | |
1024 | off_t lastpg, olastpgend, lastoff; | |
1025 | struct ubc_info *uip; | |
0b4e3aa0 | 1026 | memory_object_control_t control; |
2d21ac55 | 1027 | kern_return_t kret = KERN_SUCCESS; |
1c79356b | 1028 | |
55e303ae | 1029 | if (nsize < (off_t)0) |
fe8ab488 | 1030 | return EINVAL; |
1c79356b | 1031 | |
1c79356b | 1032 | if (!UBCINFOEXISTS(vp)) |
fe8ab488 | 1033 | return ENOENT; |
1c79356b A |
1034 | |
1035 | uip = vp->v_ubcinfo; | |
2d21ac55 | 1036 | osize = uip->ui_size; |
fe8ab488 A |
1037 | |
1038 | if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) | |
1039 | return EAGAIN; | |
1040 | ||
2d21ac55 A |
1041 | /* |
1042 | * Update the size before flushing the VM | |
1043 | */ | |
1c79356b A |
1044 | uip->ui_size = nsize; |
1045 | ||
b0d623f7 | 1046 | if (nsize >= osize) { /* Nothing more to do */ |
6d2010ae A |
1047 | if (nsize > osize) { |
1048 | lock_vnode_and_post(vp, NOTE_EXTEND); | |
1049 | } | |
1050 | ||
fe8ab488 | 1051 | return 0; |
b0d623f7 | 1052 | } |
1c79356b A |
1053 | |
1054 | /* | |
1055 | * When the file shrinks, invalidate the pages beyond the | |
1056 | * new size. Also get rid of garbage beyond nsize on the | |
2d21ac55 A |
1057 | * last page. The ui_size already has the nsize, so any |
1058 | * subsequent page-in will zero-fill the tail properly | |
1c79356b | 1059 | */ |
1c79356b A |
1060 | lastpg = trunc_page_64(nsize); |
1061 | olastpgend = round_page_64(osize); | |
0b4e3aa0 A |
1062 | control = uip->ui_control; |
1063 | assert(control); | |
1c79356b A |
1064 | lastoff = (nsize & PAGE_MASK_64); |
1065 | ||
2d21ac55 | 1066 | if (lastoff) { |
fe8ab488 | 1067 | upl_t upl; |
2d21ac55 A |
1068 | upl_page_info_t *pl; |
1069 | ||
fe8ab488 | 1070 | /* |
2d21ac55 | 1071 | * new EOF ends up in the middle of a page |
fe8ab488 | 1072 | * zero the tail of this page if it's currently |
2d21ac55 A |
1073 | * present in the cache |
1074 | */ | |
fe8ab488 A |
1075 | kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE); |
1076 | ||
1c79356b | 1077 | if (kret != KERN_SUCCESS) |
2d21ac55 A |
1078 | panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret); |
1079 | ||
1080 | if (upl_valid_page(pl, 0)) | |
1081 | cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL); | |
1082 | ||
1083 | ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); | |
1c79356b | 1084 | |
2d21ac55 A |
1085 | lastpg += PAGE_SIZE_64; |
1086 | } | |
1087 | if (olastpgend > lastpg) { | |
b0d623f7 A |
1088 | int flags; |
1089 | ||
1090 | if (lastpg == 0) | |
1091 | flags = MEMORY_OBJECT_DATA_FLUSH_ALL; | |
1092 | else | |
1093 | flags = MEMORY_OBJECT_DATA_FLUSH; | |
fe8ab488 | 1094 | /* |
2d21ac55 A |
1095 | * invalidate the pages beyond the new EOF page |
1096 | * | |
1097 | */ | |
fe8ab488 A |
1098 | kret = memory_object_lock_request(control, |
1099 | (memory_object_offset_t)lastpg, | |
1100 | (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, | |
1101 | MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); | |
2d21ac55 A |
1102 | if (kret != KERN_SUCCESS) |
1103 | printf("ubc_setsize: invalidate failed (error = %d)\n", kret); | |
1104 | } | |
fe8ab488 | 1105 | return mach_to_bsd_errno(kret); |
1c79356b A |
1106 | } |
1107 | ||
fe8ab488 A |
1108 | // Returns true for success |
1109 | int ubc_setsize(vnode_t vp, off_t nsize) | |
1110 | { | |
1111 | return ubc_setsize_ex(vp, nsize, 0) == 0; | |
1112 | } | |
2d21ac55 | 1113 | |
1c79356b | 1114 | /* |
2d21ac55 A |
1115 | * ubc_getsize |
1116 | * | |
1117 | * Get the size of the file assocated with the specified vnode | |
1118 | * | |
1119 | * Parameters: vp The vnode whose size is of interest | |
1120 | * | |
1121 | * Returns: 0 There is no ubc_info associated with | |
1122 | * this vnode, or the size is zero | |
1123 | * !0 The size of the file | |
1124 | * | |
1125 | * Notes: Using this routine, it is not possible for a caller to | |
1126 | * successfully distinguish between a vnode associate with a zero | |
1127 | * length file, and a vnode with no associated ubc_info. The | |
1128 | * caller therefore needs to not care, or needs to ensure that | |
1129 | * they have previously successfully called ubc_info_init() or | |
1130 | * ubc_info_init_withsize(). | |
1c79356b A |
1131 | */ |
1132 | off_t | |
1133 | ubc_getsize(struct vnode *vp) | |
1134 | { | |
91447636 A |
1135 | /* people depend on the side effect of this working this way |
1136 | * as they call this for directory | |
1c79356b | 1137 | */ |
91447636 A |
1138 | if (!UBCINFOEXISTS(vp)) |
1139 | return ((off_t)0); | |
1140 | return (vp->v_ubcinfo->ui_size); | |
1c79356b A |
1141 | } |
1142 | ||
2d21ac55 | 1143 | |
1c79356b | 1144 | /* |
2d21ac55 A |
1145 | * ubc_umount |
1146 | * | |
fe8ab488 | 1147 | * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this |
2d21ac55 A |
1148 | * mount point |
1149 | * | |
1150 | * Parameters: mp The mount point | |
1151 | * | |
1152 | * Returns: 0 Success | |
1153 | * | |
1154 | * Notes: There is no failure indication for this function. | |
1155 | * | |
1156 | * This function is used in the unmount path; since it may block | |
1157 | * I/O indefinitely, it should not be used in the forced unmount | |
1158 | * path, since a device unavailability could also block that | |
1159 | * indefinitely. | |
1160 | * | |
1161 | * Because there is no device ejection interlock on USB, FireWire, | |
1162 | * or similar devices, it's possible that an ejection that begins | |
1163 | * subsequent to the vnode_iterate() completing, either on one of | |
1164 | * those devices, or a network mount for which the server quits | |
1165 | * responding, etc., may cause the caller to block indefinitely. | |
1c79356b | 1166 | */ |
0b4e3aa0 | 1167 | __private_extern__ int |
1c79356b A |
1168 | ubc_umount(struct mount *mp) |
1169 | { | |
91447636 A |
1170 | vnode_iterate(mp, 0, ubc_umcallback, 0); |
1171 | return(0); | |
1c79356b A |
1172 | } |
1173 | ||
2d21ac55 A |
1174 | |
1175 | /* | |
1176 | * ubc_umcallback | |
1177 | * | |
1178 | * Used by ubc_umount() as an internal implementation detail; see ubc_umount() | |
1179 | * and vnode_iterate() for details of implementation. | |
1180 | */ | |
91447636 A |
1181 | static int |
1182 | ubc_umcallback(vnode_t vp, __unused void * args) | |
1c79356b | 1183 | { |
1c79356b | 1184 | |
91447636 A |
1185 | if (UBCINFOEXISTS(vp)) { |
1186 | ||
91447636 | 1187 | (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL); |
1c79356b | 1188 | } |
91447636 | 1189 | return (VNODE_RETURNED); |
1c79356b A |
1190 | } |
1191 | ||
91447636 | 1192 | |
2d21ac55 A |
1193 | /* |
1194 | * ubc_getcred | |
1195 | * | |
1196 | * Get the credentials currently active for the ubc_info associated with the | |
1197 | * vnode. | |
1198 | * | |
1199 | * Parameters: vp The vnode whose ubc_info credentials | |
1200 | * are to be retrieved | |
1201 | * | |
1202 | * Returns: !NOCRED The credentials | |
1203 | * NOCRED If there is no ubc_info for the vnode, | |
1204 | * or if there is one, but it has not had | |
1205 | * any credentials associated with it via | |
1206 | * a call to ubc_setcred() | |
1207 | */ | |
91447636 | 1208 | kauth_cred_t |
1c79356b A |
1209 | ubc_getcred(struct vnode *vp) |
1210 | { | |
91447636 A |
1211 | if (UBCINFOEXISTS(vp)) |
1212 | return (vp->v_ubcinfo->ui_ucred); | |
1c79356b | 1213 | |
91447636 | 1214 | return (NOCRED); |
1c79356b A |
1215 | } |
1216 | ||
2d21ac55 A |
1217 | |
1218 | /* | |
1219 | * ubc_setthreadcred | |
1220 | * | |
1221 | * If they are not already set, set the credentials of the ubc_info structure | |
1222 | * associated with the vnode to those of the supplied thread; otherwise leave | |
1223 | * them alone. | |
1224 | * | |
1225 | * Parameters: vp The vnode whose ubc_info creds are to | |
1226 | * be set | |
1227 | * p The process whose credentials are to | |
1228 | * be used, if not running on an assumed | |
1229 | * credential | |
1230 | * thread The thread whose credentials are to | |
1231 | * be used | |
1232 | * | |
1233 | * Returns: 1 This vnode has no associated ubc_info | |
1234 | * 0 Success | |
1235 | * | |
1236 | * Notes: This function takes a proc parameter to account for bootstrap | |
1237 | * issues where a task or thread may call this routine, either | |
1238 | * before credentials have been initialized by bsd_init(), or if | |
1239 | * there is no BSD info asscoiate with a mach thread yet. This | |
1240 | * is known to happen in both the initial swap and memory mapping | |
1241 | * calls. | |
1242 | * | |
1243 | * This function is generally used only in the following cases: | |
1244 | * | |
1245 | * o a memory mapped file via the mmap() system call | |
2d21ac55 A |
1246 | * o a swap store backing file |
1247 | * o subsequent to a successful write via vn_write() | |
1248 | * | |
1249 | * The information is then used by the NFS client in order to | |
1250 | * cons up a wire message in either the page-in or page-out path. | |
1251 | * | |
1252 | * There are two potential problems with the use of this API: | |
1253 | * | |
1254 | * o Because the write path only set it on a successful | |
1255 | * write, there is a race window between setting the | |
1256 | * credential and its use to evict the pages to the | |
1257 | * remote file server | |
1258 | * | |
1259 | * o Because a page-in may occur prior to a write, the | |
1260 | * credential may not be set at this time, if the page-in | |
fe8ab488 | 1261 | * is not the result of a mapping established via mmap(). |
2d21ac55 A |
1262 | * |
1263 | * In both these cases, this will be triggered from the paging | |
1264 | * path, which will instead use the credential of the current | |
1265 | * process, which in this case is either the dynamic_pager or | |
1266 | * the kernel task, both of which utilize "root" credentials. | |
1267 | * | |
1268 | * This may potentially permit operations to occur which should | |
1269 | * be denied, or it may cause to be denied operations which | |
1270 | * should be permitted, depending on the configuration of the NFS | |
1271 | * server. | |
1272 | */ | |
13fec989 | 1273 | int |
2d21ac55 | 1274 | ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread) |
13fec989 A |
1275 | { |
1276 | struct ubc_info *uip; | |
1277 | kauth_cred_t credp; | |
2d21ac55 | 1278 | struct uthread *uthread = get_bsdthread_info(thread); |
13fec989 A |
1279 | |
1280 | if (!UBCINFOEXISTS(vp)) | |
2d21ac55 | 1281 | return (1); |
13fec989 A |
1282 | |
1283 | vnode_lock(vp); | |
1284 | ||
1285 | uip = vp->v_ubcinfo; | |
1286 | credp = uip->ui_ucred; | |
1287 | ||
0c530ab8 | 1288 | if (!IS_VALID_CRED(credp)) { |
13fec989 A |
1289 | /* use per-thread cred, if assumed identity, else proc cred */ |
1290 | if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) { | |
1291 | uip->ui_ucred = kauth_cred_proc_ref(p); | |
1292 | } else { | |
1293 | uip->ui_ucred = uthread->uu_ucred; | |
1294 | kauth_cred_ref(uip->ui_ucred); | |
1295 | } | |
2d21ac55 | 1296 | } |
13fec989 A |
1297 | vnode_unlock(vp); |
1298 | ||
1299 | return (0); | |
1300 | } | |
1301 | ||
2d21ac55 | 1302 | |
1c79356b | 1303 | /* |
2d21ac55 A |
1304 | * ubc_setcred |
1305 | * | |
1306 | * If they are not already set, set the credentials of the ubc_info structure | |
1307 | * associated with the vnode to those of the process; otherwise leave them | |
1308 | * alone. | |
1309 | * | |
1310 | * Parameters: vp The vnode whose ubc_info creds are to | |
1311 | * be set | |
1312 | * p The process whose credentials are to | |
1313 | * be used | |
1314 | * | |
1315 | * Returns: 0 This vnode has no associated ubc_info | |
1316 | * 1 Success | |
1317 | * | |
1318 | * Notes: The return values for this function are inverted from nearly | |
1319 | * all other uses in the kernel. | |
1320 | * | |
1321 | * See also ubc_setthreadcred(), above. | |
1322 | * | |
1323 | * This function is considered deprecated, and generally should | |
1324 | * not be used, as it is incompatible with per-thread credentials; | |
1325 | * it exists for legacy KPI reasons. | |
1326 | * | |
1327 | * DEPRECATION: ubc_setcred() is being deprecated. Please use | |
1328 | * ubc_setthreadcred() instead. | |
1c79356b | 1329 | */ |
1c79356b | 1330 | int |
2d21ac55 | 1331 | ubc_setcred(struct vnode *vp, proc_t p) |
1c79356b A |
1332 | { |
1333 | struct ubc_info *uip; | |
91447636 | 1334 | kauth_cred_t credp; |
1c79356b | 1335 | |
2d21ac55 A |
1336 | /* If there is no ubc_info, deny the operation */ |
1337 | if ( !UBCINFOEXISTS(vp)) | |
1c79356b | 1338 | return (0); |
1c79356b | 1339 | |
2d21ac55 A |
1340 | /* |
1341 | * Check to see if there is already a credential reference in the | |
1342 | * ubc_info; if there is not, take one on the supplied credential. | |
1343 | */ | |
91447636 | 1344 | vnode_lock(vp); |
91447636 | 1345 | uip = vp->v_ubcinfo; |
1c79356b | 1346 | credp = uip->ui_ucred; |
0c530ab8 | 1347 | if (!IS_VALID_CRED(credp)) { |
91447636 | 1348 | uip->ui_ucred = kauth_cred_proc_ref(p); |
1c79356b | 1349 | } |
91447636 | 1350 | vnode_unlock(vp); |
1c79356b A |
1351 | |
1352 | return (1); | |
1353 | } | |
1354 | ||
2d21ac55 A |
1355 | /* |
1356 | * ubc_getpager | |
1357 | * | |
1358 | * Get the pager associated with the ubc_info associated with the vnode. | |
1359 | * | |
1360 | * Parameters: vp The vnode to obtain the pager from | |
1361 | * | |
1362 | * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager | |
1363 | * VNODE_PAGER_NULL There is no ubc_info for this vnode | |
1364 | * | |
1365 | * Notes: For each vnode that has a ubc_info associated with it, that | |
1366 | * ubc_info SHALL have a pager associated with it, so in the | |
1367 | * normal case, it's impossible to return VNODE_PAGER_NULL for | |
1368 | * a vnode with an associated ubc_info. | |
1369 | */ | |
0b4e3aa0 | 1370 | __private_extern__ memory_object_t |
1c79356b A |
1371 | ubc_getpager(struct vnode *vp) |
1372 | { | |
91447636 A |
1373 | if (UBCINFOEXISTS(vp)) |
1374 | return (vp->v_ubcinfo->ui_pager); | |
1c79356b | 1375 | |
91447636 | 1376 | return (0); |
1c79356b A |
1377 | } |
1378 | ||
2d21ac55 | 1379 | |
1c79356b | 1380 | /* |
2d21ac55 A |
1381 | * ubc_getobject |
1382 | * | |
1383 | * Get the memory object control associated with the ubc_info associated with | |
1384 | * the vnode | |
1385 | * | |
1386 | * Parameters: vp The vnode to obtain the memory object | |
1387 | * from | |
1388 | * flags DEPRECATED | |
1389 | * | |
1390 | * Returns: !MEMORY_OBJECT_CONTROL_NULL | |
1391 | * MEMORY_OBJECT_CONTROL_NULL | |
1392 | * | |
1393 | * Notes: Historically, if the flags were not "do not reactivate", this | |
1394 | * function would look up the memory object using the pager if | |
1395 | * it did not exist (this could be the case if the vnode had | |
1396 | * been previously reactivated). The flags would also permit a | |
1397 | * hold to be requested, which would have created an object | |
1398 | * reference, if one had not already existed. This usage is | |
1399 | * deprecated, as it would permit a race between finding and | |
1400 | * taking the reference vs. a single reference being dropped in | |
1401 | * another thread. | |
1c79356b | 1402 | */ |
0b4e3aa0 | 1403 | memory_object_control_t |
91447636 | 1404 | ubc_getobject(struct vnode *vp, __unused int flags) |
1c79356b | 1405 | { |
91447636 A |
1406 | if (UBCINFOEXISTS(vp)) |
1407 | return((vp->v_ubcinfo->ui_control)); | |
1c79356b | 1408 | |
2d21ac55 | 1409 | return (MEMORY_OBJECT_CONTROL_NULL); |
1c79356b A |
1410 | } |
1411 | ||
6d2010ae A |
1412 | boolean_t |
1413 | ubc_strict_uncached_IO(struct vnode *vp) | |
1414 | { | |
1415 | boolean_t result = FALSE; | |
1416 | ||
1417 | if (UBCINFOEXISTS(vp)) { | |
1418 | result = memory_object_is_slid(vp->v_ubcinfo->ui_control); | |
1419 | } | |
1420 | return result; | |
1421 | } | |
1c79356b | 1422 | |
2d21ac55 A |
1423 | /* |
1424 | * ubc_blktooff | |
1425 | * | |
1426 | * Convert a given block number to a memory backing object (file) offset for a | |
1427 | * given vnode | |
1428 | * | |
1429 | * Parameters: vp The vnode in which the block is located | |
1430 | * blkno The block number to convert | |
1431 | * | |
1432 | * Returns: !-1 The offset into the backing object | |
1433 | * -1 There is no ubc_info associated with | |
1434 | * the vnode | |
1435 | * -1 An error occurred in the underlying VFS | |
1436 | * while translating the block to an | |
1437 | * offset; the most likely cause is that | |
1438 | * the caller specified a block past the | |
1439 | * end of the file, but this could also be | |
1440 | * any other error from VNOP_BLKTOOFF(). | |
1441 | * | |
1442 | * Note: Representing the error in band loses some information, but does | |
1443 | * not occlude a valid offset, since an off_t of -1 is normally | |
1444 | * used to represent EOF. If we had a more reliable constant in | |
1445 | * our header files for it (i.e. explicitly cast to an off_t), we | |
1446 | * would use it here instead. | |
1447 | */ | |
1c79356b | 1448 | off_t |
91447636 | 1449 | ubc_blktooff(vnode_t vp, daddr64_t blkno) |
1c79356b | 1450 | { |
2d21ac55 | 1451 | off_t file_offset = -1; |
1c79356b A |
1452 | int error; |
1453 | ||
2d21ac55 A |
1454 | if (UBCINFOEXISTS(vp)) { |
1455 | error = VNOP_BLKTOOFF(vp, blkno, &file_offset); | |
1456 | if (error) | |
1457 | file_offset = -1; | |
1458 | } | |
1c79356b A |
1459 | |
1460 | return (file_offset); | |
1461 | } | |
0b4e3aa0 | 1462 | |
2d21ac55 A |
1463 | |
1464 | /* | |
1465 | * ubc_offtoblk | |
1466 | * | |
1467 | * Convert a given offset in a memory backing object into a block number for a | |
1468 | * given vnode | |
1469 | * | |
1470 | * Parameters: vp The vnode in which the offset is | |
1471 | * located | |
1472 | * offset The offset into the backing object | |
1473 | * | |
1474 | * Returns: !-1 The returned block number | |
1475 | * -1 There is no ubc_info associated with | |
1476 | * the vnode | |
1477 | * -1 An error occurred in the underlying VFS | |
1478 | * while translating the block to an | |
1479 | * offset; the most likely cause is that | |
1480 | * the caller specified a block past the | |
1481 | * end of the file, but this could also be | |
1482 | * any other error from VNOP_OFFTOBLK(). | |
1483 | * | |
1484 | * Note: Representing the error in band loses some information, but does | |
1485 | * not occlude a valid block number, since block numbers exceed | |
1486 | * the valid range for offsets, due to their relative sizes. If | |
1487 | * we had a more reliable constant than -1 in our header files | |
1488 | * for it (i.e. explicitly cast to an daddr64_t), we would use it | |
1489 | * here instead. | |
1490 | */ | |
91447636 A |
1491 | daddr64_t |
1492 | ubc_offtoblk(vnode_t vp, off_t offset) | |
1c79356b | 1493 | { |
2d21ac55 | 1494 | daddr64_t blkno = -1; |
0b4e3aa0 | 1495 | int error = 0; |
1c79356b | 1496 | |
2d21ac55 A |
1497 | if (UBCINFOEXISTS(vp)) { |
1498 | error = VNOP_OFFTOBLK(vp, offset, &blkno); | |
1499 | if (error) | |
1500 | blkno = -1; | |
1501 | } | |
1c79356b A |
1502 | |
1503 | return (blkno); | |
1504 | } | |
1505 | ||
2d21ac55 A |
1506 | |
1507 | /* | |
1508 | * ubc_pages_resident | |
1509 | * | |
1510 | * Determine whether or not a given vnode has pages resident via the memory | |
1511 | * object control associated with the ubc_info associated with the vnode | |
1512 | * | |
1513 | * Parameters: vp The vnode we want to know about | |
1514 | * | |
1515 | * Returns: 1 Yes | |
1516 | * 0 No | |
1517 | */ | |
1c79356b | 1518 | int |
91447636 | 1519 | ubc_pages_resident(vnode_t vp) |
1c79356b | 1520 | { |
91447636 A |
1521 | kern_return_t kret; |
1522 | boolean_t has_pages_resident; | |
1523 | ||
2d21ac55 | 1524 | if (!UBCINFOEXISTS(vp)) |
0b4e3aa0 | 1525 | return (0); |
91447636 | 1526 | |
2d21ac55 A |
1527 | /* |
1528 | * The following call may fail if an invalid ui_control is specified, | |
1529 | * or if there is no VM object associated with the control object. In | |
1530 | * either case, reacting to it as if there were no pages resident will | |
1531 | * result in correct behavior. | |
1532 | */ | |
91447636 A |
1533 | kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident); |
1534 | ||
1535 | if (kret != KERN_SUCCESS) | |
0b4e3aa0 | 1536 | return (0); |
91447636 A |
1537 | |
1538 | if (has_pages_resident == TRUE) | |
1539 | return (1); | |
1540 | ||
1541 | return (0); | |
1542 | } | |
1c79356b | 1543 | |
0b4e3aa0 | 1544 | /* |
2d21ac55 A |
1545 | * ubc_msync |
1546 | * | |
1547 | * Clean and/or invalidate a range in the memory object that backs this vnode | |
1548 | * | |
1549 | * Parameters: vp The vnode whose associated ubc_info's | |
1550 | * associated memory object is to have a | |
1551 | * range invalidated within it | |
1552 | * beg_off The start of the range, as an offset | |
1553 | * end_off The end of the range, as an offset | |
1554 | * resid_off The address of an off_t supplied by the | |
1555 | * caller; may be set to NULL to ignore | |
1556 | * flags See ubc_msync_internal() | |
1557 | * | |
1558 | * Returns: 0 Success | |
1559 | * !0 Failure; an errno is returned | |
1560 | * | |
1561 | * Implicit Returns: | |
1562 | * *resid_off, modified If non-NULL, the contents are ALWAYS | |
1563 | * modified; they are initialized to the | |
1564 | * beg_off, and in case of an I/O error, | |
1565 | * the difference between beg_off and the | |
1566 | * current value will reflect what was | |
1567 | * able to be written before the error | |
1568 | * occurred. If no error is returned, the | |
1569 | * value of the resid_off is undefined; do | |
1570 | * NOT use it in place of end_off if you | |
1571 | * intend to increment from the end of the | |
1572 | * last call and call iteratively. | |
1573 | * | |
1574 | * Notes: see ubc_msync_internal() for more detailed information. | |
1575 | * | |
0b4e3aa0 | 1576 | */ |
91447636 A |
1577 | errno_t |
1578 | ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags) | |
0b4e3aa0 | 1579 | { |
91447636 A |
1580 | int retval; |
1581 | int io_errno = 0; | |
1582 | ||
1583 | if (resid_off) | |
1584 | *resid_off = beg_off; | |
0b4e3aa0 | 1585 | |
91447636 | 1586 | retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno); |
0b4e3aa0 | 1587 | |
91447636 A |
1588 | if (retval == 0 && io_errno == 0) |
1589 | return (EINVAL); | |
1590 | return (io_errno); | |
1591 | } | |
0b4e3aa0 | 1592 | |
1c79356b | 1593 | |
1c79356b | 1594 | /* |
fe8ab488 A |
1595 | * ubc_msync_internal |
1596 | * | |
2d21ac55 A |
1597 | * Clean and/or invalidate a range in the memory object that backs this vnode |
1598 | * | |
1599 | * Parameters: vp The vnode whose associated ubc_info's | |
1600 | * associated memory object is to have a | |
1601 | * range invalidated within it | |
1602 | * beg_off The start of the range, as an offset | |
1603 | * end_off The end of the range, as an offset | |
1604 | * resid_off The address of an off_t supplied by the | |
1605 | * caller; may be set to NULL to ignore | |
1606 | * flags MUST contain at least one of the flags | |
1607 | * UBC_INVALIDATE, UBC_PUSHDIRTY, or | |
1608 | * UBC_PUSHALL; if UBC_PUSHDIRTY is used, | |
1609 | * UBC_SYNC may also be specified to cause | |
1610 | * this function to block until the | |
1611 | * operation is complete. The behavior | |
1612 | * of UBC_SYNC is otherwise undefined. | |
1613 | * io_errno The address of an int to contain the | |
1614 | * errno from a failed I/O operation, if | |
1615 | * one occurs; may be set to NULL to | |
1616 | * ignore | |
1617 | * | |
1618 | * Returns: 1 Success | |
1619 | * 0 Failure | |
1620 | * | |
1621 | * Implicit Returns: | |
1622 | * *resid_off, modified The contents of this offset MAY be | |
1623 | * modified; in case of an I/O error, the | |
1624 | * difference between beg_off and the | |
1625 | * current value will reflect what was | |
1626 | * able to be written before the error | |
1627 | * occurred. | |
1628 | * *io_errno, modified The contents of this offset are set to | |
1629 | * an errno, if an error occurs; if the | |
1630 | * caller supplies an io_errno parameter, | |
1631 | * they should be careful to initialize it | |
1632 | * to 0 before calling this function to | |
1633 | * enable them to distinguish an error | |
1634 | * with a valid *resid_off from an invalid | |
1635 | * one, and to avoid potentially falsely | |
1636 | * reporting an error, depending on use. | |
1637 | * | |
1638 | * Notes: If there is no ubc_info associated with the vnode supplied, | |
1639 | * this function immediately returns success. | |
1640 | * | |
1641 | * If the value of end_off is less than or equal to beg_off, this | |
1642 | * function immediately returns success; that is, end_off is NOT | |
1643 | * inclusive. | |
1644 | * | |
1645 | * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or | |
1646 | * UBC_PUSHALL MUST be specified; that is, it is NOT possible to | |
1647 | * attempt to block on in-progress I/O by calling this function | |
1648 | * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC | |
1649 | * in order to block pending on the I/O already in progress. | |
1650 | * | |
1651 | * The start offset is truncated to the page boundary and the | |
1652 | * size is adjusted to include the last page in the range; that | |
1653 | * is, end_off on exactly a page boundary will not change if it | |
1654 | * is rounded, and the range of bytes written will be from the | |
1655 | * truncate beg_off to the rounded (end_off - 1). | |
1c79356b | 1656 | */ |
91447636 A |
1657 | static int |
1658 | ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno) | |
1c79356b | 1659 | { |
91447636 A |
1660 | memory_object_size_t tsize; |
1661 | kern_return_t kret; | |
1662 | int request_flags = 0; | |
1663 | int flush_flags = MEMORY_OBJECT_RETURN_NONE; | |
1664 | ||
1665 | if ( !UBCINFOEXISTS(vp)) | |
1666 | return (0); | |
91447636 A |
1667 | if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) |
1668 | return (0); | |
2d21ac55 A |
1669 | if (end_off <= beg_off) |
1670 | return (1); | |
91447636 A |
1671 | |
1672 | if (flags & UBC_INVALIDATE) | |
1673 | /* | |
1674 | * discard the resident pages | |
1675 | */ | |
1676 | request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE); | |
1c79356b | 1677 | |
91447636 A |
1678 | if (flags & UBC_SYNC) |
1679 | /* | |
1680 | * wait for all the I/O to complete before returning | |
55e303ae | 1681 | */ |
91447636 | 1682 | request_flags |= MEMORY_OBJECT_IO_SYNC; |
55e303ae | 1683 | |
91447636 A |
1684 | if (flags & UBC_PUSHDIRTY) |
1685 | /* | |
1686 | * we only return the dirty pages in the range | |
1687 | */ | |
1688 | flush_flags = MEMORY_OBJECT_RETURN_DIRTY; | |
0b4e3aa0 | 1689 | |
91447636 A |
1690 | if (flags & UBC_PUSHALL) |
1691 | /* | |
2d21ac55 A |
1692 | * then return all the interesting pages in the range (both |
1693 | * dirty and precious) to the pager | |
91447636 A |
1694 | */ |
1695 | flush_flags = MEMORY_OBJECT_RETURN_ALL; | |
0b4e3aa0 | 1696 | |
91447636 A |
1697 | beg_off = trunc_page_64(beg_off); |
1698 | end_off = round_page_64(end_off); | |
1699 | tsize = (memory_object_size_t)end_off - beg_off; | |
b4c24cb9 | 1700 | |
91447636 A |
1701 | /* flush and/or invalidate pages in the range requested */ |
1702 | kret = memory_object_lock_request(vp->v_ubcinfo->ui_control, | |
2d21ac55 A |
1703 | beg_off, tsize, |
1704 | (memory_object_offset_t *)resid_off, | |
1705 | io_errno, flush_flags, request_flags, | |
1706 | VM_PROT_NO_CHANGE); | |
91447636 A |
1707 | |
1708 | return ((kret == KERN_SUCCESS) ? 1 : 0); | |
1c79356b A |
1709 | } |
1710 | ||
1c79356b A |
1711 | |
1712 | /* | |
fe8ab488 | 1713 | * ubc_map |
2d21ac55 A |
1714 | * |
1715 | * Explicitly map a vnode that has an associate ubc_info, and add a reference | |
1716 | * to it for the ubc system, if there isn't one already, so it will not be | |
1717 | * recycled while it's in use, and set flags on the ubc_info to indicate that | |
1718 | * we have done this | |
1719 | * | |
1720 | * Parameters: vp The vnode to map | |
1721 | * flags The mapping flags for the vnode; this | |
1722 | * will be a combination of one or more of | |
1723 | * PROT_READ, PROT_WRITE, and PROT_EXEC | |
1724 | * | |
1725 | * Returns: 0 Success | |
1726 | * EPERM Permission was denied | |
1727 | * | |
1728 | * Notes: An I/O reference on the vnode must already be held on entry | |
1729 | * | |
1730 | * If there is no ubc_info associated with the vnode, this function | |
1731 | * will return success. | |
1732 | * | |
1733 | * If a permission error occurs, this function will return | |
1734 | * failure; all other failures will cause this function to return | |
1735 | * success. | |
1736 | * | |
1737 | * IMPORTANT: This is an internal use function, and its symbols | |
1738 | * are not exported, hence its error checking is not very robust. | |
1739 | * It is primarily used by: | |
1740 | * | |
1741 | * o mmap(), when mapping a file | |
2d21ac55 A |
1742 | * o When mapping a shared file (a shared library in the |
1743 | * shared segment region) | |
1744 | * o When loading a program image during the exec process | |
1745 | * | |
1746 | * ...all of these uses ignore the return code, and any fault that | |
1747 | * results later because of a failure is handled in the fix-up path | |
1748 | * of the fault handler. The interface exists primarily as a | |
1749 | * performance hint. | |
1750 | * | |
1751 | * Given that third party implementation of the type of interfaces | |
1752 | * that would use this function, such as alternative executable | |
1753 | * formats, etc., are unsupported, this function is not exported | |
1754 | * for general use. | |
1755 | * | |
1756 | * The extra reference is held until the VM system unmaps the | |
1757 | * vnode from its own context to maintain a vnode reference in | |
1758 | * cases like open()/mmap()/close(), which leave the backing | |
1759 | * object referenced by a mapped memory region in a process | |
1760 | * address space. | |
1c79356b | 1761 | */ |
91447636 A |
1762 | __private_extern__ int |
1763 | ubc_map(vnode_t vp, int flags) | |
1c79356b A |
1764 | { |
1765 | struct ubc_info *uip; | |
91447636 A |
1766 | int error = 0; |
1767 | int need_ref = 0; | |
2d21ac55 | 1768 | int need_wakeup = 0; |
1c79356b | 1769 | |
91447636 | 1770 | if (UBCINFOEXISTS(vp)) { |
1c79356b | 1771 | |
2d21ac55 A |
1772 | vnode_lock(vp); |
1773 | uip = vp->v_ubcinfo; | |
1774 | ||
1775 | while (ISSET(uip->ui_flags, UI_MAPBUSY)) { | |
1776 | SET(uip->ui_flags, UI_MAPWAITING); | |
1777 | (void) msleep(&uip->ui_flags, &vp->v_lock, | |
1778 | PRIBIO, "ubc_map", NULL); | |
1779 | } | |
1780 | SET(uip->ui_flags, UI_MAPBUSY); | |
1781 | vnode_unlock(vp); | |
1782 | ||
1783 | error = VNOP_MMAP(vp, flags, vfs_context_current()); | |
1c79356b | 1784 | |
39037602 A |
1785 | /* |
1786 | * rdar://problem/22587101 required that we stop propagating | |
1787 | * EPERM up the stack. Otherwise, we would have to funnel up | |
1788 | * the error at all the call sites for memory_object_map(). | |
1789 | * The risk is in having to undo the map/object/entry state at | |
1790 | * all these call sites. It would also affect more than just mmap() | |
1791 | * e.g. vm_remap(). | |
1792 | * | |
1793 | * if (error != EPERM) | |
1794 | * error = 0; | |
1795 | */ | |
1796 | ||
1797 | error = 0; | |
1c79356b | 1798 | |
2d21ac55 | 1799 | vnode_lock_spin(vp); |
1c79356b | 1800 | |
2d21ac55 | 1801 | if (error == 0) { |
91447636 A |
1802 | if ( !ISSET(uip->ui_flags, UI_ISMAPPED)) |
1803 | need_ref = 1; | |
1804 | SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED)); | |
22ba694c A |
1805 | if (flags & PROT_WRITE) { |
1806 | SET(uip->ui_flags, UI_MAPPEDWRITE); | |
1807 | } | |
2d21ac55 A |
1808 | } |
1809 | CLR(uip->ui_flags, UI_MAPBUSY); | |
55e303ae | 1810 | |
2d21ac55 A |
1811 | if (ISSET(uip->ui_flags, UI_MAPWAITING)) { |
1812 | CLR(uip->ui_flags, UI_MAPWAITING); | |
1813 | need_wakeup = 1; | |
55e303ae | 1814 | } |
2d21ac55 | 1815 | vnode_unlock(vp); |
b4c24cb9 | 1816 | |
2d21ac55 A |
1817 | if (need_wakeup) |
1818 | wakeup(&uip->ui_flags); | |
1819 | ||
39037602 A |
1820 | if (need_ref) { |
1821 | /* | |
1822 | * Make sure we get a ref as we can't unwind from here | |
1823 | */ | |
1824 | if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) | |
1825 | panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__); | |
1826 | } | |
2d21ac55 | 1827 | } |
91447636 | 1828 | return (error); |
0b4e3aa0 A |
1829 | } |
1830 | ||
2d21ac55 | 1831 | |
0b4e3aa0 | 1832 | /* |
2d21ac55 A |
1833 | * ubc_destroy_named |
1834 | * | |
1835 | * Destroy the named memory object associated with the ubc_info control object | |
1836 | * associated with the designated vnode, if there is a ubc_info associated | |
1837 | * with the vnode, and a control object is associated with it | |
1838 | * | |
1839 | * Parameters: vp The designated vnode | |
1840 | * | |
1841 | * Returns: (void) | |
1842 | * | |
1843 | * Notes: This function is called on vnode termination for all vnodes, | |
1844 | * and must therefore not assume that there is a ubc_info that is | |
1845 | * associated with the vnode, nor that there is a control object | |
1846 | * associated with the ubc_info. | |
1847 | * | |
1848 | * If all the conditions necessary are present, this function | |
1849 | * calls memory_object_destory(), which will in turn end up | |
1850 | * calling ubc_unmap() to release any vnode references that were | |
1851 | * established via ubc_map(). | |
1852 | * | |
1853 | * IMPORTANT: This is an internal use function that is used | |
1854 | * exclusively by the internal use function vclean(). | |
0b4e3aa0 | 1855 | */ |
2d21ac55 A |
1856 | __private_extern__ void |
1857 | ubc_destroy_named(vnode_t vp) | |
0b4e3aa0 A |
1858 | { |
1859 | memory_object_control_t control; | |
0b4e3aa0 A |
1860 | struct ubc_info *uip; |
1861 | kern_return_t kret; | |
1862 | ||
2d21ac55 A |
1863 | if (UBCINFOEXISTS(vp)) { |
1864 | uip = vp->v_ubcinfo; | |
1865 | ||
1866 | /* Terminate the memory object */ | |
1867 | control = ubc_getobject(vp, UBC_HOLDOBJECT); | |
1868 | if (control != MEMORY_OBJECT_CONTROL_NULL) { | |
1869 | kret = memory_object_destroy(control, 0); | |
1870 | if (kret != KERN_SUCCESS) | |
1871 | panic("ubc_destroy_named: memory_object_destroy failed"); | |
0b4e3aa0 A |
1872 | } |
1873 | } | |
1c79356b A |
1874 | } |
1875 | ||
0b4e3aa0 | 1876 | |
1c79356b | 1877 | /* |
2d21ac55 A |
1878 | * ubc_isinuse |
1879 | * | |
1880 | * Determine whether or not a vnode is currently in use by ubc at a level in | |
1881 | * excess of the requested busycount | |
1882 | * | |
1883 | * Parameters: vp The vnode to check | |
1884 | * busycount The threshold busy count, used to bias | |
1885 | * the count usually already held by the | |
1886 | * caller to avoid races | |
1887 | * | |
1888 | * Returns: 1 The vnode is in use over the threshold | |
1889 | * 0 The vnode is not in use over the | |
1890 | * threshold | |
1891 | * | |
1892 | * Notes: Because the vnode is only held locked while actually asking | |
1893 | * the use count, this function only represents a snapshot of the | |
1894 | * current state of the vnode. If more accurate information is | |
1895 | * required, an additional busycount should be held by the caller | |
1896 | * and a non-zero busycount used. | |
1897 | * | |
1898 | * If there is no ubc_info associated with the vnode, this | |
1899 | * function will report that the vnode is not in use by ubc. | |
1c79356b A |
1900 | */ |
1901 | int | |
91447636 | 1902 | ubc_isinuse(struct vnode *vp, int busycount) |
1c79356b | 1903 | { |
91447636 | 1904 | if ( !UBCINFOEXISTS(vp)) |
0b4e3aa0 | 1905 | return (0); |
91447636 | 1906 | return(ubc_isinuse_locked(vp, busycount, 0)); |
1c79356b A |
1907 | } |
1908 | ||
91447636 | 1909 | |
2d21ac55 A |
1910 | /* |
1911 | * ubc_isinuse_locked | |
1912 | * | |
1913 | * Determine whether or not a vnode is currently in use by ubc at a level in | |
1914 | * excess of the requested busycount | |
1915 | * | |
1916 | * Parameters: vp The vnode to check | |
1917 | * busycount The threshold busy count, used to bias | |
1918 | * the count usually already held by the | |
1919 | * caller to avoid races | |
1920 | * locked True if the vnode is already locked by | |
1921 | * the caller | |
1922 | * | |
1923 | * Returns: 1 The vnode is in use over the threshold | |
1924 | * 0 The vnode is not in use over the | |
1925 | * threshold | |
1926 | * | |
1927 | * Notes: If the vnode is not locked on entry, it is locked while | |
1928 | * actually asking the use count. If this is the case, this | |
1929 | * function only represents a snapshot of the current state of | |
1930 | * the vnode. If more accurate information is required, the | |
1931 | * vnode lock should be held by the caller, otherwise an | |
1932 | * additional busycount should be held by the caller and a | |
1933 | * non-zero busycount used. | |
1934 | * | |
1935 | * If there is no ubc_info associated with the vnode, this | |
1936 | * function will report that the vnode is not in use by ubc. | |
1937 | */ | |
1c79356b | 1938 | int |
91447636 | 1939 | ubc_isinuse_locked(struct vnode *vp, int busycount, int locked) |
1c79356b | 1940 | { |
91447636 | 1941 | int retval = 0; |
1c79356b | 1942 | |
9bccf70c | 1943 | |
91447636 | 1944 | if (!locked) |
b0d623f7 | 1945 | vnode_lock_spin(vp); |
1c79356b | 1946 | |
91447636 A |
1947 | if ((vp->v_usecount - vp->v_kusecount) > busycount) |
1948 | retval = 1; | |
1949 | ||
1950 | if (!locked) | |
1951 | vnode_unlock(vp); | |
1952 | return (retval); | |
1c79356b A |
1953 | } |
1954 | ||
91447636 | 1955 | |
1c79356b | 1956 | /* |
2d21ac55 A |
1957 | * ubc_unmap |
1958 | * | |
1959 | * Reverse the effects of a ubc_map() call for a given vnode | |
1960 | * | |
1961 | * Parameters: vp vnode to unmap from ubc | |
1962 | * | |
1963 | * Returns: (void) | |
1964 | * | |
1965 | * Notes: This is an internal use function used by vnode_pager_unmap(). | |
1966 | * It will attempt to obtain a reference on the supplied vnode, | |
1967 | * and if it can do so, and there is an associated ubc_info, and | |
1968 | * the flags indicate that it was mapped via ubc_map(), then the | |
1969 | * flag is cleared, the mapping removed, and the reference taken | |
1970 | * by ubc_map() is released. | |
1971 | * | |
1972 | * IMPORTANT: This MUST only be called by the VM | |
1973 | * to prevent race conditions. | |
1c79356b | 1974 | */ |
0b4e3aa0 | 1975 | __private_extern__ void |
1c79356b A |
1976 | ubc_unmap(struct vnode *vp) |
1977 | { | |
1978 | struct ubc_info *uip; | |
91447636 | 1979 | int need_rele = 0; |
2d21ac55 | 1980 | int need_wakeup = 0; |
b0d623f7 | 1981 | |
91447636 A |
1982 | if (vnode_getwithref(vp)) |
1983 | return; | |
1c79356b | 1984 | |
91447636 | 1985 | if (UBCINFOEXISTS(vp)) { |
fe8ab488 A |
1986 | bool want_fsevent = false; |
1987 | ||
91447636 | 1988 | vnode_lock(vp); |
91447636 | 1989 | uip = vp->v_ubcinfo; |
2d21ac55 A |
1990 | |
1991 | while (ISSET(uip->ui_flags, UI_MAPBUSY)) { | |
1992 | SET(uip->ui_flags, UI_MAPWAITING); | |
1993 | (void) msleep(&uip->ui_flags, &vp->v_lock, | |
1994 | PRIBIO, "ubc_unmap", NULL); | |
1995 | } | |
1996 | SET(uip->ui_flags, UI_MAPBUSY); | |
1997 | ||
91447636 | 1998 | if (ISSET(uip->ui_flags, UI_ISMAPPED)) { |
fe8ab488 A |
1999 | if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) |
2000 | want_fsevent = true; | |
2001 | ||
91447636 | 2002 | need_rele = 1; |
fe8ab488 A |
2003 | |
2004 | /* | |
2005 | * We want to clear the mapped flags after we've called | |
2006 | * VNOP_MNOMAP to avoid certain races and allow | |
2007 | * VNOP_MNOMAP to call ubc_is_mapped_writable. | |
2008 | */ | |
91447636 A |
2009 | } |
2010 | vnode_unlock(vp); | |
fe8ab488 | 2011 | |
91447636 | 2012 | if (need_rele) { |
fe8ab488 A |
2013 | vfs_context_t ctx = vfs_context_current(); |
2014 | ||
2015 | (void)VNOP_MNOMAP(vp, ctx); | |
2016 | ||
2017 | #if CONFIG_FSE | |
2018 | /* | |
2019 | * Why do we want an fsevent here? Normally the | |
2020 | * content modified fsevent is posted when a file is | |
2021 | * closed and only if it's written to via conventional | |
2022 | * means. It's perfectly legal to close a file and | |
2023 | * keep your mappings and we don't currently track | |
2024 | * whether it was written to via a mapping. | |
2025 | * Therefore, we need to post an fsevent here if the | |
2026 | * file was mapped writable. This may result in false | |
2027 | * events, i.e. we post a notification when nothing | |
2028 | * has really changed. | |
2029 | */ | |
2030 | if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { | |
2031 | add_fsevent(FSE_CONTENT_MODIFIED, ctx, | |
2032 | FSE_ARG_VNODE, vp, | |
2033 | FSE_ARG_DONE); | |
2034 | } | |
2035 | #endif | |
2036 | ||
b0d623f7 | 2037 | vnode_rele(vp); |
91447636 | 2038 | } |
2d21ac55 A |
2039 | |
2040 | vnode_lock_spin(vp); | |
2041 | ||
fe8ab488 A |
2042 | if (need_rele) |
2043 | CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE); | |
2044 | ||
2d21ac55 | 2045 | CLR(uip->ui_flags, UI_MAPBUSY); |
fe8ab488 | 2046 | |
2d21ac55 A |
2047 | if (ISSET(uip->ui_flags, UI_MAPWAITING)) { |
2048 | CLR(uip->ui_flags, UI_MAPWAITING); | |
2049 | need_wakeup = 1; | |
2050 | } | |
2051 | vnode_unlock(vp); | |
2052 | ||
2053 | if (need_wakeup) | |
b0d623f7 | 2054 | wakeup(&uip->ui_flags); |
2d21ac55 | 2055 | |
91447636 A |
2056 | } |
2057 | /* | |
2058 | * the drop of the vnode ref will cleanup | |
2059 | */ | |
2060 | vnode_put(vp); | |
0b4e3aa0 A |
2061 | } |
2062 | ||
2d21ac55 A |
2063 | |
2064 | /* | |
2065 | * ubc_page_op | |
2066 | * | |
2067 | * Manipulate individual page state for a vnode with an associated ubc_info | |
2068 | * with an associated memory object control. | |
2069 | * | |
2070 | * Parameters: vp The vnode backing the page | |
2071 | * f_offset A file offset interior to the page | |
2072 | * ops The operations to perform, as a bitmap | |
2073 | * (see below for more information) | |
2074 | * phys_entryp The address of a ppnum_t; may be NULL | |
2075 | * to ignore | |
2076 | * flagsp A pointer to an int to contain flags; | |
2077 | * may be NULL to ignore | |
2078 | * | |
2079 | * Returns: KERN_SUCCESS Success | |
2080 | * KERN_INVALID_ARGUMENT If the memory object control has no VM | |
2081 | * object associated | |
2082 | * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is | |
2083 | * not physically contiguous | |
2084 | * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is | |
2085 | * physically contiguous | |
2086 | * KERN_FAILURE If the page cannot be looked up | |
2087 | * | |
2088 | * Implicit Returns: | |
2089 | * *phys_entryp (modified) If phys_entryp is non-NULL and | |
2090 | * UPL_POP_PHYSICAL | |
2091 | * *flagsp (modified) If flagsp is non-NULL and there was | |
2092 | * !UPL_POP_PHYSICAL and a KERN_SUCCESS | |
2093 | * | |
2094 | * Notes: For object boundaries, it is considerably more efficient to | |
2095 | * ensure that f_offset is in fact on a page boundary, as this | |
2096 | * will avoid internal use of the hash table to identify the | |
2097 | * page, and would therefore skip a number of early optimizations. | |
2098 | * Since this is a page operation anyway, the caller should try | |
2099 | * to pass only a page aligned offset because of this. | |
2100 | * | |
2101 | * *flagsp may be modified even if this function fails. If it is | |
2102 | * modified, it will contain the condition of the page before the | |
2103 | * requested operation was attempted; these will only include the | |
2104 | * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP, | |
2105 | * UPL_POP_SET, or UPL_POP_CLR bits. | |
2106 | * | |
2107 | * The flags field may contain a specific operation, such as | |
2108 | * UPL_POP_PHYSICAL or UPL_POP_DUMP: | |
2109 | * | |
2110 | * o UPL_POP_PHYSICAL Fail if not contiguous; if | |
2111 | * *phys_entryp and successful, set | |
2112 | * *phys_entryp | |
2113 | * o UPL_POP_DUMP Dump the specified page | |
2114 | * | |
2115 | * Otherwise, it is treated as a bitmap of one or more page | |
2116 | * operations to perform on the final memory object; allowable | |
2117 | * bit values are: | |
2118 | * | |
2119 | * o UPL_POP_DIRTY The page is dirty | |
2120 | * o UPL_POP_PAGEOUT The page is paged out | |
2121 | * o UPL_POP_PRECIOUS The page is precious | |
2122 | * o UPL_POP_ABSENT The page is absent | |
2123 | * o UPL_POP_BUSY The page is busy | |
2124 | * | |
2125 | * If the page status is only being queried and not modified, then | |
2126 | * not other bits should be specified. However, if it is being | |
2127 | * modified, exactly ONE of the following bits should be set: | |
2128 | * | |
2129 | * o UPL_POP_SET Set the current bitmap bits | |
2130 | * o UPL_POP_CLR Clear the current bitmap bits | |
2131 | * | |
2132 | * Thus to effect a combination of setting an clearing, it may be | |
2133 | * necessary to call this function twice. If this is done, the | |
2134 | * set should be used before the clear, since clearing may trigger | |
2135 | * a wakeup on the destination page, and if the page is backed by | |
2136 | * an encrypted swap file, setting will trigger the decryption | |
2137 | * needed before the wakeup occurs. | |
2138 | */ | |
0b4e3aa0 A |
2139 | kern_return_t |
2140 | ubc_page_op( | |
2141 | struct vnode *vp, | |
2142 | off_t f_offset, | |
2143 | int ops, | |
55e303ae | 2144 | ppnum_t *phys_entryp, |
0b4e3aa0 A |
2145 | int *flagsp) |
2146 | { | |
2147 | memory_object_control_t control; | |
2148 | ||
2149 | control = ubc_getobject(vp, UBC_FLAGS_NONE); | |
2150 | if (control == MEMORY_OBJECT_CONTROL_NULL) | |
2151 | return KERN_INVALID_ARGUMENT; | |
2152 | ||
2153 | return (memory_object_page_op(control, | |
2154 | (memory_object_offset_t)f_offset, | |
2155 | ops, | |
2156 | phys_entryp, | |
2157 | flagsp)); | |
2158 | } | |
2d21ac55 A |
2159 | |
2160 | ||
2161 | /* | |
2162 | * ubc_range_op | |
2163 | * | |
2164 | * Manipulate page state for a range of memory for a vnode with an associated | |
2165 | * ubc_info with an associated memory object control, when page level state is | |
2166 | * not required to be returned from the call (i.e. there are no phys_entryp or | |
2167 | * flagsp parameters to this call, and it takes a range which may contain | |
2168 | * multiple pages, rather than an offset interior to a single page). | |
2169 | * | |
2170 | * Parameters: vp The vnode backing the page | |
2171 | * f_offset_beg A file offset interior to the start page | |
2172 | * f_offset_end A file offset interior to the end page | |
2173 | * ops The operations to perform, as a bitmap | |
2174 | * (see below for more information) | |
2175 | * range The address of an int; may be NULL to | |
2176 | * ignore | |
2177 | * | |
2178 | * Returns: KERN_SUCCESS Success | |
2179 | * KERN_INVALID_ARGUMENT If the memory object control has no VM | |
2180 | * object associated | |
2181 | * KERN_INVALID_OBJECT If the object is physically contiguous | |
2182 | * | |
2183 | * Implicit Returns: | |
2184 | * *range (modified) If range is non-NULL, its contents will | |
2185 | * be modified to contain the number of | |
2186 | * bytes successfully operated upon. | |
2187 | * | |
2188 | * Notes: IMPORTANT: This function cannot be used on a range that | |
2189 | * consists of physically contiguous pages. | |
2190 | * | |
2191 | * For object boundaries, it is considerably more efficient to | |
2192 | * ensure that f_offset_beg and f_offset_end are in fact on page | |
2193 | * boundaries, as this will avoid internal use of the hash table | |
2194 | * to identify the page, and would therefore skip a number of | |
2195 | * early optimizations. Since this is an operation on a set of | |
2196 | * pages anyway, the caller should try to pass only a page aligned | |
2197 | * offsets because of this. | |
2198 | * | |
2199 | * *range will be modified only if this function succeeds. | |
2200 | * | |
2201 | * The flags field MUST contain a specific operation; allowable | |
2202 | * values are: | |
2203 | * | |
2204 | * o UPL_ROP_ABSENT Returns the extent of the range | |
2205 | * presented which is absent, starting | |
2206 | * with the start address presented | |
2207 | * | |
2208 | * o UPL_ROP_PRESENT Returns the extent of the range | |
2209 | * presented which is present (resident), | |
2210 | * starting with the start address | |
2211 | * presented | |
2212 | * o UPL_ROP_DUMP Dump the pages which are found in the | |
2213 | * target object for the target range. | |
2214 | * | |
2215 | * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are | |
2216 | * multiple regions in the range, only the first matching region | |
2217 | * is returned. | |
2218 | */ | |
55e303ae A |
2219 | kern_return_t |
2220 | ubc_range_op( | |
2221 | struct vnode *vp, | |
2222 | off_t f_offset_beg, | |
2223 | off_t f_offset_end, | |
2224 | int ops, | |
2225 | int *range) | |
2226 | { | |
2227 | memory_object_control_t control; | |
2228 | ||
2229 | control = ubc_getobject(vp, UBC_FLAGS_NONE); | |
2230 | if (control == MEMORY_OBJECT_CONTROL_NULL) | |
2231 | return KERN_INVALID_ARGUMENT; | |
2232 | ||
2233 | return (memory_object_range_op(control, | |
2234 | (memory_object_offset_t)f_offset_beg, | |
2235 | (memory_object_offset_t)f_offset_end, | |
2236 | ops, | |
2237 | range)); | |
2238 | } | |
2d21ac55 A |
2239 | |
2240 | ||
2241 | /* | |
2242 | * ubc_create_upl | |
2243 | * | |
2244 | * Given a vnode, cause the population of a portion of the vm_object; based on | |
2245 | * the nature of the request, the pages returned may contain valid data, or | |
2246 | * they may be uninitialized. | |
2247 | * | |
2248 | * Parameters: vp The vnode from which to create the upl | |
2249 | * f_offset The start offset into the backing store | |
2250 | * represented by the vnode | |
2251 | * bufsize The size of the upl to create | |
2252 | * uplp Pointer to the upl_t to receive the | |
2253 | * created upl; MUST NOT be NULL | |
2254 | * plp Pointer to receive the internal page | |
2255 | * list for the created upl; MAY be NULL | |
2256 | * to ignore | |
2257 | * | |
2258 | * Returns: KERN_SUCCESS The requested upl has been created | |
2259 | * KERN_INVALID_ARGUMENT The bufsize argument is not an even | |
2260 | * multiple of the page size | |
2261 | * KERN_INVALID_ARGUMENT There is no ubc_info associated with | |
2262 | * the vnode, or there is no memory object | |
2263 | * control associated with the ubc_info | |
2264 | * memory_object_upl_request:KERN_INVALID_VALUE | |
2265 | * The supplied upl_flags argument is | |
2266 | * invalid | |
2267 | * Implicit Returns: | |
2268 | * *uplp (modified) | |
2269 | * *plp (modified) If non-NULL, the value of *plp will be | |
2270 | * modified to point to the internal page | |
2271 | * list; this modification may occur even | |
2272 | * if this function is unsuccessful, in | |
2273 | * which case the contents may be invalid | |
2274 | * | |
2275 | * Note: If successful, the returned *uplp MUST subsequently be freed | |
2276 | * via a call to ubc_upl_commit(), ubc_upl_commit_range(), | |
2277 | * ubc_upl_abort(), or ubc_upl_abort_range(). | |
2278 | */ | |
0b4e3aa0 A |
2279 | kern_return_t |
2280 | ubc_create_upl( | |
2281 | struct vnode *vp, | |
2d21ac55 | 2282 | off_t f_offset, |
b0d623f7 | 2283 | int bufsize, |
2d21ac55 | 2284 | upl_t *uplp, |
0b4e3aa0 | 2285 | upl_page_info_t **plp, |
2d21ac55 | 2286 | int uplflags) |
0b4e3aa0 A |
2287 | { |
2288 | memory_object_control_t control; | |
55e303ae | 2289 | kern_return_t kr; |
b0d623f7 A |
2290 | |
2291 | if (plp != NULL) | |
2292 | *plp = NULL; | |
2293 | *uplp = NULL; | |
0b4e3aa0 A |
2294 | |
2295 | if (bufsize & 0xfff) | |
2296 | return KERN_INVALID_ARGUMENT; | |
2297 | ||
fe8ab488 | 2298 | if (bufsize > MAX_UPL_SIZE_BYTES) |
6d2010ae A |
2299 | return KERN_INVALID_ARGUMENT; |
2300 | ||
b0d623f7 A |
2301 | if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) { |
2302 | ||
2303 | if (uplflags & UPL_UBC_MSYNC) { | |
2304 | uplflags &= UPL_RET_ONLY_DIRTY; | |
2305 | ||
2306 | uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE | | |
2307 | UPL_SET_INTERNAL | UPL_SET_LITE; | |
2308 | ||
2309 | } else if (uplflags & UPL_UBC_PAGEOUT) { | |
2310 | uplflags &= UPL_RET_ONLY_DIRTY; | |
2311 | ||
2312 | if (uplflags & UPL_RET_ONLY_DIRTY) | |
2313 | uplflags |= UPL_NOBLOCK; | |
2314 | ||
2315 | uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE | | |
2316 | UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; | |
2317 | } else { | |
316670eb | 2318 | uplflags |= UPL_RET_ONLY_ABSENT | |
b0d623f7 A |
2319 | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | |
2320 | UPL_SET_INTERNAL | UPL_SET_LITE; | |
316670eb A |
2321 | |
2322 | /* | |
2323 | * if the requested size == PAGE_SIZE, we don't want to set | |
2324 | * the UPL_NOBLOCK since we may be trying to recover from a | |
2325 | * previous partial pagein I/O that occurred because we were low | |
2326 | * on memory and bailed early in order to honor the UPL_NOBLOCK... | |
2327 | * since we're only asking for a single page, we can block w/o fear | |
2328 | * of tying up pages while waiting for more to become available | |
2329 | */ | |
2330 | if (bufsize > PAGE_SIZE) | |
2331 | uplflags |= UPL_NOBLOCK; | |
b0d623f7 A |
2332 | } |
2333 | } else { | |
55e303ae | 2334 | uplflags &= ~UPL_FOR_PAGEOUT; |
55e303ae | 2335 | |
b0d623f7 A |
2336 | if (uplflags & UPL_WILL_BE_DUMPED) { |
2337 | uplflags &= ~UPL_WILL_BE_DUMPED; | |
2338 | uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); | |
2339 | } else | |
2340 | uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); | |
2341 | } | |
2342 | control = ubc_getobject(vp, UBC_FLAGS_NONE); | |
0b4e3aa0 A |
2343 | if (control == MEMORY_OBJECT_CONTROL_NULL) |
2344 | return KERN_INVALID_ARGUMENT; | |
2345 | ||
b0d623f7 A |
2346 | kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags); |
2347 | if (kr == KERN_SUCCESS && plp != NULL) | |
2348 | *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); | |
0b4e3aa0 A |
2349 | return kr; |
2350 | } | |
2d21ac55 A |
2351 | |
2352 | ||
2353 | /* | |
2354 | * ubc_upl_maxbufsize | |
2355 | * | |
2356 | * Return the maximum bufsize ubc_create_upl( ) will take. | |
2357 | * | |
2358 | * Parameters: none | |
2359 | * | |
2360 | * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take. | |
2361 | */ | |
2362 | upl_size_t | |
2363 | ubc_upl_maxbufsize( | |
2364 | void) | |
2365 | { | |
fe8ab488 | 2366 | return(MAX_UPL_SIZE_BYTES); |
2d21ac55 | 2367 | } |
0b4e3aa0 | 2368 | |
2d21ac55 A |
2369 | /* |
2370 | * ubc_upl_map | |
2371 | * | |
2372 | * Map the page list assocated with the supplied upl into the kernel virtual | |
2373 | * address space at the virtual address indicated by the dst_addr argument; | |
2374 | * the entire upl is mapped | |
2375 | * | |
2376 | * Parameters: upl The upl to map | |
2377 | * dst_addr The address at which to map the upl | |
2378 | * | |
2379 | * Returns: KERN_SUCCESS The upl has been mapped | |
2380 | * KERN_INVALID_ARGUMENT The upl is UPL_NULL | |
2381 | * KERN_FAILURE The upl is already mapped | |
2382 | * vm_map_enter:KERN_INVALID_ARGUMENT | |
2383 | * A failure code from vm_map_enter() due | |
2384 | * to an invalid argument | |
2385 | */ | |
0b4e3aa0 A |
2386 | kern_return_t |
2387 | ubc_upl_map( | |
2388 | upl_t upl, | |
2389 | vm_offset_t *dst_addr) | |
2390 | { | |
2391 | return (vm_upl_map(kernel_map, upl, dst_addr)); | |
2392 | } | |
2393 | ||
2394 | ||
2d21ac55 A |
2395 | /* |
2396 | * ubc_upl_unmap | |
2397 | * | |
2398 | * Unmap the page list assocated with the supplied upl from the kernel virtual | |
2399 | * address space; the entire upl is unmapped. | |
2400 | * | |
2401 | * Parameters: upl The upl to unmap | |
2402 | * | |
2403 | * Returns: KERN_SUCCESS The upl has been unmapped | |
2404 | * KERN_FAILURE The upl is not currently mapped | |
2405 | * KERN_INVALID_ARGUMENT If the upl is UPL_NULL | |
2406 | */ | |
0b4e3aa0 A |
2407 | kern_return_t |
2408 | ubc_upl_unmap( | |
2409 | upl_t upl) | |
2410 | { | |
2411 | return(vm_upl_unmap(kernel_map, upl)); | |
2412 | } | |
2413 | ||
2d21ac55 A |
2414 | |
2415 | /* | |
2416 | * ubc_upl_commit | |
2417 | * | |
2418 | * Commit the contents of the upl to the backing store | |
2419 | * | |
2420 | * Parameters: upl The upl to commit | |
2421 | * | |
2422 | * Returns: KERN_SUCCESS The upl has been committed | |
2423 | * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL | |
2424 | * KERN_FAILURE The supplied upl does not represent | |
2425 | * device memory, and the offset plus the | |
2426 | * size would exceed the actual size of | |
2427 | * the upl | |
2428 | * | |
2429 | * Notes: In practice, the only return value for this function should be | |
2430 | * KERN_SUCCESS, unless there has been data structure corruption; | |
2431 | * since the upl is deallocated regardless of success or failure, | |
2432 | * there's really nothing to do about this other than panic. | |
2433 | * | |
2434 | * IMPORTANT: Use of this function should not be mixed with use of | |
2435 | * ubc_upl_commit_range(), due to the unconditional deallocation | |
2436 | * by this function. | |
2437 | */ | |
0b4e3aa0 A |
2438 | kern_return_t |
2439 | ubc_upl_commit( | |
2440 | upl_t upl) | |
2441 | { | |
2442 | upl_page_info_t *pl; | |
2443 | kern_return_t kr; | |
2444 | ||
2445 | pl = UPL_GET_INTERNAL_PAGE_LIST(upl); | |
fe8ab488 | 2446 | kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT); |
0b4e3aa0 A |
2447 | upl_deallocate(upl); |
2448 | return kr; | |
1c79356b A |
2449 | } |
2450 | ||
0b4e3aa0 | 2451 | |
2d21ac55 A |
2452 | /* |
2453 | * ubc_upl_commit | |
2454 | * | |
2455 | * Commit the contents of the specified range of the upl to the backing store | |
2456 | * | |
2457 | * Parameters: upl The upl to commit | |
2458 | * offset The offset into the upl | |
2459 | * size The size of the region to be committed, | |
2460 | * starting at the specified offset | |
2461 | * flags commit type (see below) | |
2462 | * | |
2463 | * Returns: KERN_SUCCESS The range has been committed | |
2464 | * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL | |
2465 | * KERN_FAILURE The supplied upl does not represent | |
2466 | * device memory, and the offset plus the | |
2467 | * size would exceed the actual size of | |
2468 | * the upl | |
2469 | * | |
2470 | * Notes: IMPORTANT: If the commit is successful, and the object is now | |
2471 | * empty, the upl will be deallocated. Since the caller cannot | |
2472 | * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag | |
2473 | * should generally only be used when the offset is 0 and the size | |
2474 | * is equal to the upl size. | |
2475 | * | |
2476 | * The flags argument is a bitmap of flags on the rage of pages in | |
2477 | * the upl to be committed; allowable flags are: | |
2478 | * | |
2479 | * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is | |
2480 | * both empty and has been | |
2481 | * successfully committed | |
2482 | * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty | |
2483 | * bit; will prevent a | |
2484 | * later pageout | |
2485 | * o UPL_COMMIT_SET_DIRTY Set each pages dirty | |
2486 | * bit; will cause a later | |
2487 | * pageout | |
2488 | * o UPL_COMMIT_INACTIVATE Clear each pages | |
2489 | * reference bit; the page | |
2490 | * will not be accessed | |
2491 | * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages | |
2492 | * become busy when an | |
2493 | * IOMemoryDescriptor is | |
2494 | * mapped or redirected, | |
2495 | * and we have to wait for | |
2496 | * an IOKit driver | |
2497 | * | |
2498 | * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should | |
2499 | * not be specified by the caller. | |
2500 | * | |
2501 | * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are | |
2502 | * mutually exclusive, and should not be combined. | |
2503 | */ | |
0b4e3aa0 A |
2504 | kern_return_t |
2505 | ubc_upl_commit_range( | |
2506 | upl_t upl, | |
b0d623f7 A |
2507 | upl_offset_t offset, |
2508 | upl_size_t size, | |
0b4e3aa0 A |
2509 | int flags) |
2510 | { | |
2511 | upl_page_info_t *pl; | |
2512 | boolean_t empty; | |
2513 | kern_return_t kr; | |
2514 | ||
2515 | if (flags & UPL_COMMIT_FREE_ON_EMPTY) | |
2516 | flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
2517 | ||
593a1d5f A |
2518 | if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { |
2519 | return KERN_INVALID_ARGUMENT; | |
2520 | } | |
2521 | ||
0b4e3aa0 A |
2522 | pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
2523 | ||
2524 | kr = upl_commit_range(upl, offset, size, flags, | |
fe8ab488 | 2525 | pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty); |
0b4e3aa0 A |
2526 | |
2527 | if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) | |
2528 | upl_deallocate(upl); | |
2529 | ||
2530 | return kr; | |
2531 | } | |
2d21ac55 A |
2532 | |
2533 | ||
2534 | /* | |
2535 | * ubc_upl_abort_range | |
2536 | * | |
2537 | * Abort the contents of the specified range of the specified upl | |
2538 | * | |
2539 | * Parameters: upl The upl to abort | |
2540 | * offset The offset into the upl | |
2541 | * size The size of the region to be aborted, | |
2542 | * starting at the specified offset | |
2543 | * abort_flags abort type (see below) | |
2544 | * | |
2545 | * Returns: KERN_SUCCESS The range has been aborted | |
2546 | * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL | |
2547 | * KERN_FAILURE The supplied upl does not represent | |
2548 | * device memory, and the offset plus the | |
2549 | * size would exceed the actual size of | |
2550 | * the upl | |
2551 | * | |
2552 | * Notes: IMPORTANT: If the abort is successful, and the object is now | |
2553 | * empty, the upl will be deallocated. Since the caller cannot | |
2554 | * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag | |
2555 | * should generally only be used when the offset is 0 and the size | |
2556 | * is equal to the upl size. | |
2557 | * | |
2558 | * The abort_flags argument is a bitmap of flags on the range of | |
2559 | * pages in the upl to be aborted; allowable flags are: | |
2560 | * | |
2561 | * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both | |
2562 | * empty and has been successfully | |
2563 | * aborted | |
2564 | * o UPL_ABORT_RESTART The operation must be restarted | |
2565 | * o UPL_ABORT_UNAVAILABLE The pages are unavailable | |
2566 | * o UPL_ABORT_ERROR An I/O error occurred | |
2567 | * o UPL_ABORT_DUMP_PAGES Just free the pages | |
2568 | * o UPL_ABORT_NOTIFY_EMPTY RESERVED | |
2569 | * o UPL_ABORT_ALLOW_ACCESS RESERVED | |
2570 | * | |
2571 | * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should | |
2572 | * not be specified by the caller. It is intended to fulfill the | |
2573 | * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function | |
2574 | * ubc_upl_commit_range(), but is never referenced internally. | |
2575 | * | |
2576 | * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor | |
2577 | * referenced; do not use it. | |
2578 | */ | |
0b4e3aa0 A |
2579 | kern_return_t |
2580 | ubc_upl_abort_range( | |
2581 | upl_t upl, | |
b0d623f7 A |
2582 | upl_offset_t offset, |
2583 | upl_size_t size, | |
0b4e3aa0 A |
2584 | int abort_flags) |
2585 | { | |
2586 | kern_return_t kr; | |
2587 | boolean_t empty = FALSE; | |
2588 | ||
2589 | if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) | |
2590 | abort_flags |= UPL_ABORT_NOTIFY_EMPTY; | |
2591 | ||
2592 | kr = upl_abort_range(upl, offset, size, abort_flags, &empty); | |
2593 | ||
2594 | if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) | |
2595 | upl_deallocate(upl); | |
2596 | ||
2597 | return kr; | |
2598 | } | |
2599 | ||
2d21ac55 A |
2600 | |
2601 | /* | |
2602 | * ubc_upl_abort | |
2603 | * | |
2604 | * Abort the contents of the specified upl | |
2605 | * | |
2606 | * Parameters: upl The upl to abort | |
2607 | * abort_type abort type (see below) | |
2608 | * | |
2609 | * Returns: KERN_SUCCESS The range has been aborted | |
2610 | * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL | |
2611 | * KERN_FAILURE The supplied upl does not represent | |
2612 | * device memory, and the offset plus the | |
2613 | * size would exceed the actual size of | |
2614 | * the upl | |
2615 | * | |
2616 | * Notes: IMPORTANT: If the abort is successful, and the object is now | |
2617 | * empty, the upl will be deallocated. Since the caller cannot | |
2618 | * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag | |
2619 | * should generally only be used when the offset is 0 and the size | |
2620 | * is equal to the upl size. | |
2621 | * | |
2622 | * The abort_type is a bitmap of flags on the range of | |
2623 | * pages in the upl to be aborted; allowable flags are: | |
2624 | * | |
2625 | * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both | |
2626 | * empty and has been successfully | |
2627 | * aborted | |
2628 | * o UPL_ABORT_RESTART The operation must be restarted | |
2629 | * o UPL_ABORT_UNAVAILABLE The pages are unavailable | |
2630 | * o UPL_ABORT_ERROR An I/O error occurred | |
2631 | * o UPL_ABORT_DUMP_PAGES Just free the pages | |
2632 | * o UPL_ABORT_NOTIFY_EMPTY RESERVED | |
2633 | * o UPL_ABORT_ALLOW_ACCESS RESERVED | |
2634 | * | |
2635 | * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should | |
2636 | * not be specified by the caller. It is intended to fulfill the | |
2637 | * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function | |
2638 | * ubc_upl_commit_range(), but is never referenced internally. | |
2639 | * | |
2640 | * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor | |
2641 | * referenced; do not use it. | |
2642 | */ | |
0b4e3aa0 A |
2643 | kern_return_t |
2644 | ubc_upl_abort( | |
2645 | upl_t upl, | |
2646 | int abort_type) | |
2647 | { | |
2648 | kern_return_t kr; | |
2649 | ||
2650 | kr = upl_abort(upl, abort_type); | |
2651 | upl_deallocate(upl); | |
2652 | return kr; | |
2653 | } | |
2654 | ||
2d21ac55 A |
2655 | |
2656 | /* | |
2657 | * ubc_upl_pageinfo | |
2658 | * | |
2659 | * Retrieve the internal page list for the specified upl | |
2660 | * | |
2661 | * Parameters: upl The upl to obtain the page list from | |
2662 | * | |
2663 | * Returns: !NULL The (upl_page_info_t *) for the page | |
2664 | * list internal to the upl | |
2665 | * NULL Error/no page list associated | |
2666 | * | |
2667 | * Notes: IMPORTANT: The function is only valid on internal objects | |
2668 | * where the list request was made with the UPL_INTERNAL flag. | |
2669 | * | |
2670 | * This function is a utility helper function, since some callers | |
2671 | * may not have direct access to the header defining the macro, | |
2672 | * due to abstraction layering constraints. | |
2673 | */ | |
0b4e3aa0 A |
2674 | upl_page_info_t * |
2675 | ubc_upl_pageinfo( | |
2676 | upl_t upl) | |
2677 | { | |
2678 | return (UPL_GET_INTERNAL_PAGE_LIST(upl)); | |
2679 | } | |
91447636 | 2680 | |
91447636 A |
2681 | |
2682 | int | |
fe8ab488 | 2683 | UBCINFOEXISTS(const struct vnode * vp) |
91447636 | 2684 | { |
2d21ac55 | 2685 | return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL)); |
91447636 A |
2686 | } |
2687 | ||
2d21ac55 | 2688 | |
316670eb A |
2689 | void |
2690 | ubc_upl_range_needed( | |
2691 | upl_t upl, | |
2692 | int index, | |
2693 | int count) | |
2694 | { | |
2695 | upl_range_needed(upl, index, count); | |
2696 | } | |
2697 | ||
fe8ab488 A |
2698 | boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable) |
2699 | { | |
2700 | if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) | |
2701 | return FALSE; | |
2702 | if (writable) | |
2703 | *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE); | |
2704 | return TRUE; | |
2705 | } | |
2706 | ||
2707 | boolean_t ubc_is_mapped_writable(const struct vnode *vp) | |
2708 | { | |
2709 | boolean_t writable; | |
2710 | return ubc_is_mapped(vp, &writable) && writable; | |
2711 | } | |
2712 | ||
316670eb | 2713 | |
2d21ac55 A |
2714 | /* |
2715 | * CODE SIGNING | |
2716 | */ | |
2d21ac55 A |
2717 | static volatile SInt32 cs_blob_size = 0; |
2718 | static volatile SInt32 cs_blob_count = 0; | |
2719 | static SInt32 cs_blob_size_peak = 0; | |
2720 | static UInt32 cs_blob_size_max = 0; | |
2721 | static SInt32 cs_blob_count_peak = 0; | |
2d21ac55 | 2722 | |
6d2010ae A |
2723 | SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs"); |
2724 | SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs"); | |
2725 | SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); | |
2726 | SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); | |
2727 | SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob"); | |
2d21ac55 | 2728 | |
3e170ce0 A |
2729 | /* |
2730 | * Function: csblob_parse_teamid | |
2731 | * | |
2732 | * Description: This function returns a pointer to the team id | |
2733 | stored within the codedirectory of the csblob. | |
2734 | If the codedirectory predates team-ids, it returns | |
2735 | NULL. | |
2736 | This does not copy the name but returns a pointer to | |
2737 | it within the CD. Subsequently, the CD must be | |
2738 | available when this is used. | |
2739 | */ | |
2740 | ||
2741 | static const char * | |
2742 | csblob_parse_teamid(struct cs_blob *csblob) | |
2743 | { | |
2744 | const CS_CodeDirectory *cd; | |
2745 | ||
490019cf | 2746 | cd = csblob->csb_cd; |
3e170ce0 A |
2747 | |
2748 | if (ntohl(cd->version) < CS_SUPPORTSTEAMID) | |
2749 | return NULL; | |
2750 | ||
2751 | if (cd->teamOffset == 0) | |
2752 | return NULL; | |
2753 | ||
2754 | const char *name = ((const char *)cd) + ntohl(cd->teamOffset); | |
2755 | if (cs_debug > 1) | |
2756 | printf("found team-id %s in cdblob\n", name); | |
2757 | ||
2758 | return name; | |
2759 | } | |
2760 | ||
39236c6e | 2761 | |
593a1d5f A |
2762 | kern_return_t |
2763 | ubc_cs_blob_allocate( | |
2764 | vm_offset_t *blob_addr_p, | |
2765 | vm_size_t *blob_size_p) | |
2766 | { | |
2767 | kern_return_t kr; | |
2768 | ||
3e170ce0 | 2769 | *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY); |
593a1d5f A |
2770 | if (*blob_addr_p == 0) { |
2771 | kr = KERN_NO_SPACE; | |
2772 | } else { | |
2773 | kr = KERN_SUCCESS; | |
2774 | } | |
593a1d5f A |
2775 | return kr; |
2776 | } | |
2777 | ||
2778 | void | |
2779 | ubc_cs_blob_deallocate( | |
2780 | vm_offset_t blob_addr, | |
2781 | vm_size_t blob_size) | |
2782 | { | |
593a1d5f | 2783 | kfree((void *) blob_addr, blob_size); |
39037602 A |
2784 | } |
2785 | ||
2786 | /* | |
2787 | * Some codesigned files use a lowest common denominator page size of | |
2788 | * 4KiB, but can be used on systems that have a runtime page size of | |
2789 | * 16KiB. Since faults will only occur on 16KiB ranges in | |
2790 | * cs_validate_range(), we can convert the original Code Directory to | |
2791 | * a multi-level scheme where groups of 4 hashes are combined to form | |
2792 | * a new hash, which represents 16KiB in the on-disk file. This can | |
2793 | * reduce the wired memory requirement for the Code Directory by | |
2794 | * 75%. Care must be taken for binaries that use the "fourk" VM pager | |
2795 | * for unaligned access, which may still attempt to validate on | |
2796 | * non-16KiB multiples for compatibility with 3rd party binaries. | |
2797 | */ | |
2798 | static boolean_t | |
2799 | ubc_cs_supports_multilevel_hash(struct cs_blob *blob) | |
2800 | { | |
2801 | const CS_CodeDirectory *cd; | |
2802 | ||
2803 | /* | |
2804 | * Only applies to binaries that ship as part of the OS, | |
2805 | * primarily the shared cache. | |
2806 | */ | |
2807 | if (!blob->csb_platform_binary || blob->csb_teamid != NULL) { | |
2808 | return FALSE; | |
2809 | } | |
2810 | ||
2811 | /* | |
2812 | * If the runtime page size matches the code signing page | |
2813 | * size, there is no work to do. | |
2814 | */ | |
2815 | if (PAGE_SHIFT <= blob->csb_hash_pageshift) { | |
2816 | return FALSE; | |
2817 | } | |
2818 | ||
2819 | cd = blob->csb_cd; | |
2820 | ||
2821 | /* | |
2822 | * There must be a valid integral multiple of hashes | |
2823 | */ | |
2824 | if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) { | |
2825 | return FALSE; | |
2826 | } | |
2827 | ||
2828 | /* | |
2829 | * Scatter lists must also have ranges that have an integral number of hashes | |
2830 | */ | |
2831 | if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { | |
2832 | ||
2833 | const SC_Scatter *scatter = (const SC_Scatter*) | |
2834 | ((const char*)cd + ntohl(cd->scatterOffset)); | |
2835 | /* iterate all scatter structs to make sure they are all aligned */ | |
2836 | do { | |
2837 | uint32_t sbase = ntohl(scatter->base); | |
2838 | uint32_t scount = ntohl(scatter->count); | |
2839 | ||
2840 | /* last scatter? */ | |
2841 | if (scount == 0) { | |
2842 | break; | |
2843 | } | |
2844 | ||
2845 | if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) { | |
2846 | return FALSE; | |
2847 | } | |
2848 | ||
2849 | if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) { | |
2850 | return FALSE; | |
2851 | } | |
2852 | ||
2853 | scatter++; | |
2854 | } while(1); | |
2855 | } | |
2856 | ||
2857 | /* Covered range must be a multiple of the new page size */ | |
2858 | if (ntohl(cd->codeLimit) & PAGE_MASK) { | |
2859 | return FALSE; | |
2860 | } | |
2861 | ||
2862 | /* All checks pass */ | |
2863 | return TRUE; | |
2864 | } | |
2865 | ||
2866 | /* | |
2867 | * All state and preconditions were checked before, so this | |
2868 | * function cannot fail. | |
2869 | */ | |
2870 | static void | |
2871 | ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) | |
2872 | { | |
2873 | const CS_CodeDirectory *old_cd, *cd; | |
2874 | CS_CodeDirectory *new_cd; | |
2875 | const CS_GenericBlob *entitlements; | |
2876 | vm_offset_t new_blob_addr; | |
2877 | vm_size_t new_blob_size; | |
2878 | vm_size_t new_cdsize; | |
2879 | kern_return_t kr; | |
2880 | int error; | |
2881 | ||
2882 | uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift); | |
2883 | ||
2884 | if (cs_debug > 1) { | |
2885 | printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n", | |
2886 | (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT); | |
2887 | } | |
2888 | ||
2889 | old_cd = blob->csb_cd; | |
2890 | ||
2891 | /* Up to the hashes, we can copy all data */ | |
2892 | new_cdsize = ntohl(old_cd->hashOffset); | |
2893 | new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize; | |
2894 | ||
2895 | new_blob_size = sizeof(CS_SuperBlob); | |
2896 | new_blob_size += sizeof(CS_BlobIndex); | |
2897 | new_blob_size += new_cdsize; | |
2898 | ||
2899 | if (blob->csb_entitlements_blob) { | |
2900 | /* We need to add a slot for the entitlements */ | |
2901 | new_blob_size += sizeof(CS_BlobIndex); | |
2902 | new_blob_size += ntohl(blob->csb_entitlements_blob->length); | |
2903 | } | |
2904 | ||
2905 | kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size); | |
2906 | if (kr != KERN_SUCCESS) { | |
2907 | if (cs_debug > 1) { | |
2908 | printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n", | |
2909 | kr); | |
2910 | } | |
2911 | return; | |
2912 | } | |
2913 | ||
2914 | CS_SuperBlob *new_superblob; | |
2915 | ||
2916 | new_superblob = (CS_SuperBlob *)new_blob_addr; | |
2917 | new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE); | |
2918 | new_superblob->length = htonl((uint32_t)new_blob_size); | |
2919 | if (blob->csb_entitlements_blob) { | |
2920 | vm_size_t ent_offset, cd_offset; | |
2921 | ||
2922 | cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex); | |
2923 | ent_offset = cd_offset + new_cdsize; | |
2924 | ||
2925 | new_superblob->count = htonl(2); | |
2926 | new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY); | |
2927 | new_superblob->index[0].offset = htonl((uint32_t)cd_offset); | |
2928 | new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS); | |
2929 | new_superblob->index[1].offset = htonl((uint32_t)ent_offset); | |
2930 | ||
2931 | memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length)); | |
2932 | ||
2933 | new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset); | |
2934 | } else { | |
2935 | vm_size_t cd_offset; | |
2936 | ||
2937 | cd_offset = sizeof(CS_SuperBlob) + 1 * sizeof(CS_BlobIndex); | |
2938 | ||
2939 | new_superblob->count = htonl(1); | |
2940 | new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY); | |
2941 | new_superblob->index[0].offset = htonl((uint32_t)cd_offset); | |
2942 | ||
2943 | new_cd = (CS_CodeDirectory *)new_blob_addr; | |
2944 | } | |
2945 | ||
2946 | memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset)); | |
2947 | ||
2948 | /* Update fields in the Code Directory structure */ | |
2949 | new_cd->length = htonl((uint32_t)new_cdsize); | |
2950 | ||
2951 | uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots); | |
2952 | nCodeSlots >>= hashes_per_new_hash_shift; | |
2953 | new_cd->nCodeSlots = htonl(nCodeSlots); | |
2954 | ||
2955 | new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */ | |
2956 | ||
2957 | if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) { | |
2958 | SC_Scatter *scatter = (SC_Scatter*) | |
2959 | ((char *)new_cd + ntohl(new_cd->scatterOffset)); | |
2960 | /* iterate all scatter structs to scale their counts */ | |
2961 | do { | |
2962 | uint32_t scount = ntohl(scatter->count); | |
2963 | uint32_t sbase = ntohl(scatter->base); | |
2964 | ||
2965 | /* last scatter? */ | |
2966 | if (scount == 0) { | |
2967 | break; | |
2968 | } | |
2969 | ||
2970 | scount >>= hashes_per_new_hash_shift; | |
2971 | scatter->count = htonl(scount); | |
2972 | ||
2973 | sbase >>= hashes_per_new_hash_shift; | |
2974 | scatter->base = htonl(sbase); | |
2975 | ||
2976 | scatter++; | |
2977 | } while(1); | |
2978 | } | |
2979 | ||
2980 | /* For each group of hashes, hash them together */ | |
2981 | const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset); | |
2982 | unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset); | |
2983 | ||
2984 | uint32_t hash_index; | |
2985 | for (hash_index = 0; hash_index < nCodeSlots; hash_index++) { | |
2986 | union cs_hash_union mdctx; | |
2987 | ||
2988 | uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift; | |
2989 | const unsigned char *src = src_base + hash_index * source_hash_len; | |
2990 | unsigned char *dst = dst_base + hash_index * new_cd->hashSize; | |
2991 | ||
2992 | blob->csb_hashtype->cs_init(&mdctx); | |
2993 | blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len); | |
2994 | blob->csb_hashtype->cs_final(dst, &mdctx); | |
2995 | } | |
2996 | ||
2997 | error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements); | |
2998 | if (error) { | |
2999 | ||
3000 | if (cs_debug > 1) { | |
3001 | printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n", | |
3002 | error); | |
3003 | } | |
3004 | ||
3005 | ubc_cs_blob_deallocate(new_blob_addr, new_blob_size); | |
3006 | return; | |
3007 | } | |
3008 | ||
3009 | /* New Code Directory is ready for use, swap it out in the blob structure */ | |
3010 | ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); | |
3011 | ||
3012 | blob->csb_mem_size = new_blob_size; | |
3013 | blob->csb_mem_kaddr = new_blob_addr; | |
3014 | blob->csb_cd = cd; | |
3015 | blob->csb_entitlements_blob = entitlements; | |
3016 | ||
3017 | /* The blob has some cached attributes of the Code Directory, so update those */ | |
3018 | ||
3019 | blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */ | |
3020 | ||
3021 | blob->csb_hash_pagesize = PAGE_SIZE; | |
3022 | blob->csb_hash_pagemask = PAGE_MASK; | |
3023 | blob->csb_hash_pageshift = PAGE_SHIFT; | |
3024 | blob->csb_end_offset = ntohl(cd->codeLimit); | |
3025 | if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { | |
3026 | const SC_Scatter *scatter = (const SC_Scatter*) | |
3027 | ((const char*)cd + ntohl(cd->scatterOffset)); | |
3028 | blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE; | |
3029 | } else { | |
3030 | blob->csb_start_offset = 0; | |
3031 | } | |
593a1d5f | 3032 | } |
39236c6e | 3033 | |
2d21ac55 A |
3034 | int |
3035 | ubc_cs_blob_add( | |
3036 | struct vnode *vp, | |
3037 | cpu_type_t cputype, | |
3038 | off_t base_offset, | |
39037602 | 3039 | vm_address_t *addr, |
c18c124e | 3040 | vm_size_t size, |
39037602 | 3041 | struct image_params *imgp, |
3e170ce0 A |
3042 | __unused int flags, |
3043 | struct cs_blob **ret_blob) | |
91447636 | 3044 | { |
2d21ac55 A |
3045 | kern_return_t kr; |
3046 | struct ubc_info *uip; | |
3047 | struct cs_blob *blob, *oblob; | |
3048 | int error; | |
2d21ac55 | 3049 | const CS_CodeDirectory *cd; |
39037602 | 3050 | const CS_GenericBlob *entitlements; |
2d21ac55 | 3051 | off_t blob_start_offset, blob_end_offset; |
3e170ce0 | 3052 | union cs_hash_union mdctx; |
15129b1c A |
3053 | boolean_t record_mtime; |
3054 | ||
3055 | record_mtime = FALSE; | |
3e170ce0 A |
3056 | if (ret_blob) |
3057 | *ret_blob = NULL; | |
2d21ac55 | 3058 | |
2d21ac55 A |
3059 | blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob)); |
3060 | if (blob == NULL) { | |
3061 | return ENOMEM; | |
3062 | } | |
3063 | ||
2d21ac55 A |
3064 | /* fill in the new blob */ |
3065 | blob->csb_cpu_type = cputype; | |
3066 | blob->csb_base_offset = base_offset; | |
3067 | blob->csb_mem_size = size; | |
3068 | blob->csb_mem_offset = 0; | |
39037602 | 3069 | blob->csb_mem_kaddr = *addr; |
39236c6e | 3070 | blob->csb_flags = 0; |
fe8ab488 | 3071 | blob->csb_platform_binary = 0; |
3e170ce0 | 3072 | blob->csb_platform_path = 0; |
fe8ab488 | 3073 | blob->csb_teamid = NULL; |
39037602 A |
3074 | blob->csb_entitlements_blob = NULL; |
3075 | blob->csb_entitlements = NULL; | |
2d21ac55 | 3076 | |
39037602 A |
3077 | /* Transfer ownership. Even on error, this function will deallocate */ |
3078 | *addr = 0; | |
3079 | ||
2d21ac55 A |
3080 | /* |
3081 | * Validate the blob's contents | |
3082 | */ | |
39236c6e | 3083 | |
39037602 | 3084 | error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, size, &cd, &entitlements); |
39236c6e | 3085 | if (error) { |
4bd07ac2 A |
3086 | |
3087 | if (cs_debug) | |
39236c6e | 3088 | printf("CODESIGNING: csblob invalid: %d\n", error); |
4bd07ac2 A |
3089 | /* The vnode checker can't make the rest of this function succeed if csblob validation failed, so bail */ |
3090 | goto out; | |
3091 | ||
2d21ac55 | 3092 | } else { |
3e170ce0 A |
3093 | const unsigned char *md_base; |
3094 | uint8_t hash[CS_HASH_MAX_SIZE]; | |
3095 | int md_size; | |
3096 | ||
490019cf | 3097 | blob->csb_cd = cd; |
39037602 | 3098 | blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */ |
3e170ce0 A |
3099 | blob->csb_hashtype = cs_find_md(cd->hashType); |
3100 | if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) | |
3101 | panic("validated CodeDirectory but unsupported type"); | |
39037602 A |
3102 | |
3103 | blob->csb_hash_pageshift = cd->pageSize; | |
3104 | blob->csb_hash_pagesize = (1U << cd->pageSize); | |
3105 | blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1; | |
3106 | blob->csb_hash_firstlevel_pagesize = 0; | |
39236c6e | 3107 | blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; |
39037602 | 3108 | blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask)); |
39236c6e A |
3109 | if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { |
3110 | const SC_Scatter *scatter = (const SC_Scatter*) | |
b0d623f7 | 3111 | ((const char*)cd + ntohl(cd->scatterOffset)); |
39037602 | 3112 | blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize; |
b0d623f7 | 3113 | } else { |
3e170ce0 | 3114 | blob->csb_start_offset = 0; |
b0d623f7 | 3115 | } |
3e170ce0 A |
3116 | /* compute the blob's cdhash */ |
3117 | md_base = (const unsigned char *) cd; | |
3118 | md_size = ntohl(cd->length); | |
3119 | ||
3120 | blob->csb_hashtype->cs_init(&mdctx); | |
3121 | blob->csb_hashtype->cs_update(&mdctx, md_base, md_size); | |
3122 | blob->csb_hashtype->cs_final(hash, &mdctx); | |
3123 | ||
3124 | memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN); | |
2d21ac55 A |
3125 | } |
3126 | ||
593a1d5f A |
3127 | /* |
3128 | * Let policy module check whether the blob's signature is accepted. | |
3129 | */ | |
3130 | #if CONFIG_MACF | |
39037602 A |
3131 | unsigned int cs_flags = blob->csb_flags; |
3132 | error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, flags); | |
3133 | blob->csb_flags = cs_flags; | |
3134 | ||
fe8ab488 A |
3135 | if (error) { |
3136 | if (cs_debug) | |
3137 | printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); | |
593a1d5f | 3138 | goto out; |
fe8ab488 | 3139 | } |
39037602 | 3140 | if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) { |
c18c124e A |
3141 | if (cs_debug) |
3142 | printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid); | |
3143 | error = EPERM; | |
3144 | goto out; | |
3145 | } | |
593a1d5f A |
3146 | #endif |
3147 | ||
39037602 | 3148 | if (blob->csb_flags & CS_PLATFORM_BINARY) { |
fe8ab488 A |
3149 | if (cs_debug > 1) |
3150 | printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid); | |
3151 | blob->csb_platform_binary = 1; | |
39037602 | 3152 | blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH); |
fe8ab488 A |
3153 | } else { |
3154 | blob->csb_platform_binary = 0; | |
3e170ce0 A |
3155 | blob->csb_platform_path = 0; |
3156 | blob->csb_teamid = csblob_parse_teamid(blob); | |
fe8ab488 A |
3157 | if (cs_debug > 1) { |
3158 | if (blob->csb_teamid) | |
3159 | printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid); | |
3160 | else | |
3161 | printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid); | |
3162 | } | |
3163 | } | |
39037602 | 3164 | |
2d21ac55 A |
3165 | /* |
3166 | * Validate the blob's coverage | |
3167 | */ | |
3168 | blob_start_offset = blob->csb_base_offset + blob->csb_start_offset; | |
3169 | blob_end_offset = blob->csb_base_offset + blob->csb_end_offset; | |
3170 | ||
cf7d32b8 A |
3171 | if (blob_start_offset >= blob_end_offset || |
3172 | blob_start_offset < 0 || | |
3173 | blob_end_offset <= 0) { | |
2d21ac55 A |
3174 | /* reject empty or backwards blob */ |
3175 | error = EINVAL; | |
3176 | goto out; | |
3177 | } | |
3178 | ||
39037602 A |
3179 | if (ubc_cs_supports_multilevel_hash(blob)) { |
3180 | ubc_cs_convert_to_multilevel_hash(blob); | |
3181 | } | |
3182 | ||
2d21ac55 A |
3183 | vnode_lock(vp); |
3184 | if (! UBCINFOEXISTS(vp)) { | |
3185 | vnode_unlock(vp); | |
3186 | error = ENOENT; | |
3187 | goto out; | |
3188 | } | |
3189 | uip = vp->v_ubcinfo; | |
3190 | ||
3191 | /* check if this new blob overlaps with an existing blob */ | |
3192 | for (oblob = uip->cs_blobs; | |
3193 | oblob != NULL; | |
3194 | oblob = oblob->csb_next) { | |
3195 | off_t oblob_start_offset, oblob_end_offset; | |
3196 | ||
fe8ab488 A |
3197 | /* check for conflicting teamid */ |
3198 | if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices | |
3199 | if (!oblob->csb_platform_binary) { | |
3200 | vnode_unlock(vp); | |
3201 | error = EALREADY; | |
3202 | goto out; | |
3203 | } | |
3204 | } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices | |
3205 | if (oblob->csb_platform_binary || | |
3206 | oblob->csb_teamid == NULL || | |
3207 | strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) { | |
3208 | vnode_unlock(vp); | |
3209 | error = EALREADY; | |
3210 | goto out; | |
3211 | } | |
3212 | } else { // non teamid binary needs to be the same for app slices | |
3213 | if (oblob->csb_platform_binary || | |
3214 | oblob->csb_teamid != NULL) { | |
3215 | vnode_unlock(vp); | |
3216 | error = EALREADY; | |
3217 | goto out; | |
3218 | } | |
3219 | } | |
3220 | ||
2d21ac55 A |
3221 | oblob_start_offset = (oblob->csb_base_offset + |
3222 | oblob->csb_start_offset); | |
3223 | oblob_end_offset = (oblob->csb_base_offset + | |
3224 | oblob->csb_end_offset); | |
3225 | if (blob_start_offset >= oblob_end_offset || | |
3226 | blob_end_offset <= oblob_start_offset) { | |
3227 | /* no conflict with this existing blob */ | |
3228 | } else { | |
3229 | /* conflict ! */ | |
3230 | if (blob_start_offset == oblob_start_offset && | |
3231 | blob_end_offset == oblob_end_offset && | |
3232 | blob->csb_mem_size == oblob->csb_mem_size && | |
3233 | blob->csb_flags == oblob->csb_flags && | |
3234 | (blob->csb_cpu_type == CPU_TYPE_ANY || | |
3235 | oblob->csb_cpu_type == CPU_TYPE_ANY || | |
3236 | blob->csb_cpu_type == oblob->csb_cpu_type) && | |
3e170ce0 A |
3237 | !bcmp(blob->csb_cdhash, |
3238 | oblob->csb_cdhash, | |
3239 | CS_CDHASH_LEN)) { | |
2d21ac55 A |
3240 | /* |
3241 | * We already have this blob: | |
3242 | * we'll return success but | |
3243 | * throw away the new blob. | |
3244 | */ | |
3245 | if (oblob->csb_cpu_type == CPU_TYPE_ANY) { | |
3246 | /* | |
3247 | * The old blob matches this one | |
3248 | * but doesn't have any CPU type. | |
3249 | * Update it with whatever the caller | |
3250 | * provided this time. | |
3251 | */ | |
3252 | oblob->csb_cpu_type = cputype; | |
3253 | } | |
3254 | vnode_unlock(vp); | |
3e170ce0 A |
3255 | if (ret_blob) |
3256 | *ret_blob = oblob; | |
2d21ac55 A |
3257 | error = EAGAIN; |
3258 | goto out; | |
3259 | } else { | |
3260 | /* different blob: reject the new one */ | |
3261 | vnode_unlock(vp); | |
3262 | error = EALREADY; | |
3263 | goto out; | |
3264 | } | |
3265 | } | |
3266 | ||
3267 | } | |
3268 | ||
fe8ab488 | 3269 | |
2d21ac55 A |
3270 | /* mark this vnode's VM object as having "signed pages" */ |
3271 | kr = memory_object_signed(uip->ui_control, TRUE); | |
3272 | if (kr != KERN_SUCCESS) { | |
3273 | vnode_unlock(vp); | |
3274 | error = ENOENT; | |
3275 | goto out; | |
3276 | } | |
3277 | ||
15129b1c A |
3278 | if (uip->cs_blobs == NULL) { |
3279 | /* loading 1st blob: record the file's current "modify time" */ | |
3280 | record_mtime = TRUE; | |
3281 | } | |
3282 | ||
fe8ab488 A |
3283 | /* set the generation count for cs_blobs */ |
3284 | uip->cs_add_gen = cs_blob_generation_count; | |
3285 | ||
2d21ac55 A |
3286 | /* |
3287 | * Add this blob to the list of blobs for this vnode. | |
3288 | * We always add at the front of the list and we never remove a | |
3289 | * blob from the list, so ubc_cs_get_blobs() can return whatever | |
3290 | * the top of the list was and that list will remain valid | |
3291 | * while we validate a page, even after we release the vnode's lock. | |
3292 | */ | |
3293 | blob->csb_next = uip->cs_blobs; | |
3294 | uip->cs_blobs = blob; | |
3295 | ||
3296 | OSAddAtomic(+1, &cs_blob_count); | |
3297 | if (cs_blob_count > cs_blob_count_peak) { | |
3298 | cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ | |
3299 | } | |
b0d623f7 A |
3300 | OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); |
3301 | if ((SInt32) cs_blob_size > cs_blob_size_peak) { | |
3302 | cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ | |
2d21ac55 | 3303 | } |
b0d623f7 A |
3304 | if ((UInt32) blob->csb_mem_size > cs_blob_size_max) { |
3305 | cs_blob_size_max = (UInt32) blob->csb_mem_size; | |
2d21ac55 A |
3306 | } |
3307 | ||
c331a0be | 3308 | if (cs_debug > 1) { |
2d21ac55 | 3309 | proc_t p; |
39236c6e | 3310 | const char *name = vnode_getname_printable(vp); |
2d21ac55 A |
3311 | p = current_proc(); |
3312 | printf("CODE SIGNING: proc %d(%s) " | |
3313 | "loaded %s signatures for file (%s) " | |
3314 | "range 0x%llx:0x%llx flags 0x%x\n", | |
3315 | p->p_pid, p->p_comm, | |
3316 | blob->csb_cpu_type == -1 ? "detached" : "embedded", | |
39236c6e | 3317 | name, |
2d21ac55 A |
3318 | blob->csb_base_offset + blob->csb_start_offset, |
3319 | blob->csb_base_offset + blob->csb_end_offset, | |
3320 | blob->csb_flags); | |
39236c6e | 3321 | vnode_putname_printable(name); |
2d21ac55 A |
3322 | } |
3323 | ||
2d21ac55 A |
3324 | vnode_unlock(vp); |
3325 | ||
15129b1c A |
3326 | if (record_mtime) { |
3327 | vnode_mtime(vp, &uip->cs_mtime, vfs_context_current()); | |
3328 | } | |
3329 | ||
3e170ce0 A |
3330 | if (ret_blob) |
3331 | *ret_blob = blob; | |
3332 | ||
2d21ac55 A |
3333 | error = 0; /* success ! */ |
3334 | ||
3335 | out: | |
3336 | if (error) { | |
fe8ab488 A |
3337 | if (cs_debug) |
3338 | printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error); | |
3339 | ||
2d21ac55 A |
3340 | /* we failed; release what we allocated */ |
3341 | if (blob) { | |
39037602 A |
3342 | if (blob->csb_mem_kaddr) { |
3343 | ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); | |
3344 | blob->csb_mem_kaddr = 0; | |
3345 | } | |
3346 | if (blob->csb_entitlements != NULL) { | |
3347 | osobject_release(blob->csb_entitlements); | |
3348 | blob->csb_entitlements = NULL; | |
3349 | } | |
2d21ac55 A |
3350 | kfree(blob, sizeof (*blob)); |
3351 | blob = NULL; | |
3352 | } | |
2d21ac55 A |
3353 | } |
3354 | ||
3355 | if (error == EAGAIN) { | |
3356 | /* | |
3357 | * See above: error is EAGAIN if we were asked | |
3358 | * to add an existing blob again. We cleaned the new | |
3359 | * blob and we want to return success. | |
3360 | */ | |
3361 | error = 0; | |
2d21ac55 A |
3362 | } |
3363 | ||
3364 | return error; | |
91447636 A |
3365 | } |
3366 | ||
3e170ce0 A |
3367 | void |
3368 | csvnode_print_debug(struct vnode *vp) | |
3369 | { | |
3370 | const char *name = NULL; | |
3371 | struct ubc_info *uip; | |
3372 | struct cs_blob *blob; | |
3373 | ||
3374 | name = vnode_getname_printable(vp); | |
3375 | if (name) { | |
3376 | printf("csvnode: name: %s\n", name); | |
3377 | vnode_putname_printable(name); | |
3378 | } | |
3379 | ||
3380 | vnode_lock_spin(vp); | |
3381 | ||
3382 | if (! UBCINFOEXISTS(vp)) { | |
3383 | blob = NULL; | |
3384 | goto out; | |
3385 | } | |
3386 | ||
3387 | uip = vp->v_ubcinfo; | |
3388 | for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) { | |
3389 | printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n", | |
3390 | (unsigned long)blob->csb_start_offset, | |
3391 | (unsigned long)blob->csb_end_offset, | |
3392 | blob->csb_flags, | |
3393 | blob->csb_platform_binary ? "yes" : "no", | |
3394 | blob->csb_platform_path ? "yes" : "no", | |
3395 | blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>"); | |
3396 | } | |
3397 | ||
3398 | out: | |
3399 | vnode_unlock(vp); | |
3400 | ||
3401 | } | |
3402 | ||
2d21ac55 A |
3403 | struct cs_blob * |
3404 | ubc_cs_blob_get( | |
3405 | struct vnode *vp, | |
3406 | cpu_type_t cputype, | |
3407 | off_t offset) | |
91447636 | 3408 | { |
2d21ac55 A |
3409 | struct ubc_info *uip; |
3410 | struct cs_blob *blob; | |
3411 | off_t offset_in_blob; | |
3412 | ||
3413 | vnode_lock_spin(vp); | |
3414 | ||
3415 | if (! UBCINFOEXISTS(vp)) { | |
3416 | blob = NULL; | |
3417 | goto out; | |
3418 | } | |
3419 | ||
3420 | uip = vp->v_ubcinfo; | |
3421 | for (blob = uip->cs_blobs; | |
3422 | blob != NULL; | |
3423 | blob = blob->csb_next) { | |
3424 | if (cputype != -1 && blob->csb_cpu_type == cputype) { | |
3425 | break; | |
3426 | } | |
3427 | if (offset != -1) { | |
3428 | offset_in_blob = offset - blob->csb_base_offset; | |
3429 | if (offset_in_blob >= blob->csb_start_offset && | |
3430 | offset_in_blob < blob->csb_end_offset) { | |
3431 | /* our offset is covered by this blob */ | |
3432 | break; | |
3433 | } | |
3434 | } | |
3435 | } | |
3436 | ||
3437 | out: | |
3438 | vnode_unlock(vp); | |
3439 | ||
3440 | return blob; | |
91447636 | 3441 | } |
2d21ac55 A |
3442 | |
3443 | static void | |
3444 | ubc_cs_free( | |
3445 | struct ubc_info *uip) | |
91447636 | 3446 | { |
2d21ac55 A |
3447 | struct cs_blob *blob, *next_blob; |
3448 | ||
3449 | for (blob = uip->cs_blobs; | |
3450 | blob != NULL; | |
3451 | blob = next_blob) { | |
3452 | next_blob = blob->csb_next; | |
3e170ce0 | 3453 | if (blob->csb_mem_kaddr != 0) { |
593a1d5f A |
3454 | ubc_cs_blob_deallocate(blob->csb_mem_kaddr, |
3455 | blob->csb_mem_size); | |
2d21ac55 A |
3456 | blob->csb_mem_kaddr = 0; |
3457 | } | |
39037602 A |
3458 | if (blob->csb_entitlements != NULL) { |
3459 | osobject_release(blob->csb_entitlements); | |
3460 | blob->csb_entitlements = NULL; | |
593a1d5f | 3461 | } |
2d21ac55 | 3462 | OSAddAtomic(-1, &cs_blob_count); |
b0d623f7 | 3463 | OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size); |
2d21ac55 A |
3464 | kfree(blob, sizeof (*blob)); |
3465 | } | |
6d2010ae A |
3466 | #if CHECK_CS_VALIDATION_BITMAP |
3467 | ubc_cs_validation_bitmap_deallocate( uip->ui_vnode ); | |
3468 | #endif | |
2d21ac55 | 3469 | uip->cs_blobs = NULL; |
91447636 | 3470 | } |
2d21ac55 | 3471 | |
fe8ab488 A |
3472 | /* check cs blob generation on vnode |
3473 | * returns: | |
3474 | * 0 : Success, the cs_blob attached is current | |
3475 | * ENEEDAUTH : Generation count mismatch. Needs authentication again. | |
3476 | */ | |
3477 | int | |
3478 | ubc_cs_generation_check( | |
3479 | struct vnode *vp) | |
3480 | { | |
3481 | int retval = ENEEDAUTH; | |
3482 | ||
3483 | vnode_lock_spin(vp); | |
3484 | ||
3485 | if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) { | |
3486 | retval = 0; | |
3487 | } | |
3488 | ||
3489 | vnode_unlock(vp); | |
3490 | return retval; | |
3491 | } | |
3492 | ||
3493 | int | |
3494 | ubc_cs_blob_revalidate( | |
3495 | struct vnode *vp, | |
c18c124e | 3496 | struct cs_blob *blob, |
39037602 A |
3497 | struct image_params *imgp, |
3498 | int flags | |
fe8ab488 A |
3499 | ) |
3500 | { | |
3501 | int error = 0; | |
fe8ab488 | 3502 | const CS_CodeDirectory *cd = NULL; |
39037602 | 3503 | const CS_GenericBlob *entitlements = NULL; |
fe8ab488 A |
3504 | assert(vp != NULL); |
3505 | assert(blob != NULL); | |
3506 | ||
39037602 | 3507 | error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, &cd, &entitlements); |
fe8ab488 A |
3508 | if (error) { |
3509 | if (cs_debug) { | |
3510 | printf("CODESIGNING: csblob invalid: %d\n", error); | |
3511 | } | |
3512 | goto out; | |
3513 | } | |
3514 | ||
39037602 A |
3515 | unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; |
3516 | ||
fe8ab488 A |
3517 | /* callout to mac_vnode_check_signature */ |
3518 | #if CONFIG_MACF | |
39037602 | 3519 | error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, flags); |
fe8ab488 A |
3520 | if (cs_debug && error) { |
3521 | printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); | |
3522 | } | |
39037602 A |
3523 | #else |
3524 | (void)flags; | |
fe8ab488 A |
3525 | #endif |
3526 | ||
3527 | /* update generation number if success */ | |
3528 | vnode_lock_spin(vp); | |
39037602 | 3529 | blob->csb_flags = cs_flags; |
fe8ab488 A |
3530 | if (UBCINFOEXISTS(vp)) { |
3531 | if (error == 0) | |
3532 | vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count; | |
3533 | else | |
3534 | vp->v_ubcinfo->cs_add_gen = 0; | |
3535 | } | |
3536 | ||
3537 | vnode_unlock(vp); | |
3538 | ||
3539 | out: | |
3540 | return error; | |
3541 | } | |
3542 | ||
3543 | void | |
3544 | cs_blob_reset_cache() | |
3545 | { | |
3546 | /* incrementing odd no by 2 makes sure '0' is never reached. */ | |
3547 | OSAddAtomic(+2, &cs_blob_generation_count); | |
3548 | printf("Reseting cs_blob cache from all vnodes. \n"); | |
3549 | } | |
3550 | ||
2d21ac55 A |
3551 | struct cs_blob * |
3552 | ubc_get_cs_blobs( | |
3553 | struct vnode *vp) | |
91447636 | 3554 | { |
2d21ac55 A |
3555 | struct ubc_info *uip; |
3556 | struct cs_blob *blobs; | |
3557 | ||
b0d623f7 A |
3558 | /* |
3559 | * No need to take the vnode lock here. The caller must be holding | |
3560 | * a reference on the vnode (via a VM mapping or open file descriptor), | |
3561 | * so the vnode will not go away. The ubc_info stays until the vnode | |
3562 | * goes away. And we only modify "blobs" by adding to the head of the | |
3563 | * list. | |
3564 | * The ubc_info could go away entirely if the vnode gets reclaimed as | |
3565 | * part of a forced unmount. In the case of a code-signature validation | |
3566 | * during a page fault, the "paging_in_progress" reference on the VM | |
3567 | * object guarantess that the vnode pager (and the ubc_info) won't go | |
3568 | * away during the fault. | |
3569 | * Other callers need to protect against vnode reclaim by holding the | |
3570 | * vnode lock, for example. | |
3571 | */ | |
2d21ac55 A |
3572 | |
3573 | if (! UBCINFOEXISTS(vp)) { | |
3574 | blobs = NULL; | |
3575 | goto out; | |
3576 | } | |
3577 | ||
3578 | uip = vp->v_ubcinfo; | |
3579 | blobs = uip->cs_blobs; | |
3580 | ||
3581 | out: | |
2d21ac55 | 3582 | return blobs; |
91447636 | 3583 | } |
2d21ac55 | 3584 | |
15129b1c A |
3585 | void |
3586 | ubc_get_cs_mtime( | |
3587 | struct vnode *vp, | |
3588 | struct timespec *cs_mtime) | |
3589 | { | |
3590 | struct ubc_info *uip; | |
3591 | ||
3592 | if (! UBCINFOEXISTS(vp)) { | |
3593 | cs_mtime->tv_sec = 0; | |
3594 | cs_mtime->tv_nsec = 0; | |
3595 | return; | |
3596 | } | |
3597 | ||
3598 | uip = vp->v_ubcinfo; | |
3599 | cs_mtime->tv_sec = uip->cs_mtime.tv_sec; | |
3600 | cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec; | |
3601 | } | |
3602 | ||
2d21ac55 A |
3603 | unsigned long cs_validate_page_no_hash = 0; |
3604 | unsigned long cs_validate_page_bad_hash = 0; | |
39037602 A |
3605 | static boolean_t |
3606 | cs_validate_hash( | |
3607 | struct cs_blob *blobs, | |
316670eb | 3608 | memory_object_t pager, |
2d21ac55 A |
3609 | memory_object_offset_t page_offset, |
3610 | const void *data, | |
39037602 | 3611 | vm_size_t *bytes_processed, |
c18c124e | 3612 | unsigned *tainted) |
91447636 | 3613 | { |
3e170ce0 A |
3614 | union cs_hash_union mdctx; |
3615 | struct cs_hash *hashtype = NULL; | |
3616 | unsigned char actual_hash[CS_HASH_MAX_SIZE]; | |
490019cf | 3617 | unsigned char expected_hash[CS_HASH_MAX_SIZE]; |
2d21ac55 | 3618 | boolean_t found_hash; |
39037602 | 3619 | struct cs_blob *blob; |
2d21ac55 | 3620 | const CS_CodeDirectory *cd; |
2d21ac55 A |
3621 | const unsigned char *hash; |
3622 | boolean_t validated; | |
3623 | off_t offset; /* page offset in the file */ | |
3624 | size_t size; | |
3625 | off_t codeLimit = 0; | |
3e170ce0 | 3626 | const char *lower_bound, *upper_bound; |
2d21ac55 | 3627 | vm_offset_t kaddr, blob_addr; |
2d21ac55 A |
3628 | |
3629 | /* retrieve the expected hash */ | |
3630 | found_hash = FALSE; | |
2d21ac55 A |
3631 | |
3632 | for (blob = blobs; | |
3633 | blob != NULL; | |
3634 | blob = blob->csb_next) { | |
3635 | offset = page_offset - blob->csb_base_offset; | |
3636 | if (offset < blob->csb_start_offset || | |
3637 | offset >= blob->csb_end_offset) { | |
3638 | /* our page is not covered by this blob */ | |
3639 | continue; | |
3640 | } | |
3641 | ||
39037602 | 3642 | /* blob data has been released */ |
2d21ac55 A |
3643 | kaddr = blob->csb_mem_kaddr; |
3644 | if (kaddr == 0) { | |
39037602 | 3645 | continue; |
2d21ac55 | 3646 | } |
39236c6e | 3647 | |
2d21ac55 | 3648 | blob_addr = kaddr + blob->csb_mem_offset; |
2d21ac55 A |
3649 | lower_bound = CAST_DOWN(char *, blob_addr); |
3650 | upper_bound = lower_bound + blob->csb_mem_size; | |
490019cf A |
3651 | |
3652 | cd = blob->csb_cd; | |
2d21ac55 | 3653 | if (cd != NULL) { |
3e170ce0 | 3654 | /* all CD's that have been injected is already validated */ |
b0d623f7 | 3655 | |
3e170ce0 A |
3656 | hashtype = blob->csb_hashtype; |
3657 | if (hashtype == NULL) | |
3658 | panic("unknown hash type ?"); | |
3659 | if (hashtype->cs_digest_size > sizeof(actual_hash)) | |
3660 | panic("hash size too large"); | |
39037602 A |
3661 | if (offset & blob->csb_hash_pagemask) |
3662 | panic("offset not aligned to cshash boundary"); | |
3e170ce0 | 3663 | |
2d21ac55 | 3664 | codeLimit = ntohl(cd->codeLimit); |
39236c6e | 3665 | |
39037602 | 3666 | hash = hashes(cd, (uint32_t)(offset>>blob->csb_hash_pageshift), |
3e170ce0 | 3667 | hashtype->cs_size, |
2d21ac55 | 3668 | lower_bound, upper_bound); |
cf7d32b8 | 3669 | if (hash != NULL) { |
490019cf | 3670 | bcopy(hash, expected_hash, hashtype->cs_size); |
cf7d32b8 A |
3671 | found_hash = TRUE; |
3672 | } | |
2d21ac55 | 3673 | |
2d21ac55 A |
3674 | break; |
3675 | } | |
3676 | } | |
3677 | ||
3678 | if (found_hash == FALSE) { | |
3679 | /* | |
3680 | * We can't verify this page because there is no signature | |
3681 | * for it (yet). It's possible that this part of the object | |
3682 | * is not signed, or that signatures for that part have not | |
3683 | * been loaded yet. | |
3684 | * Report that the page has not been validated and let the | |
3685 | * caller decide if it wants to accept it or not. | |
3686 | */ | |
3687 | cs_validate_page_no_hash++; | |
3688 | if (cs_debug > 1) { | |
3689 | printf("CODE SIGNING: cs_validate_page: " | |
316670eb A |
3690 | "mobj %p off 0x%llx: no hash to validate !?\n", |
3691 | pager, page_offset); | |
2d21ac55 A |
3692 | } |
3693 | validated = FALSE; | |
c18c124e | 3694 | *tainted = 0; |
2d21ac55 | 3695 | } else { |
2d21ac55 | 3696 | |
c18c124e A |
3697 | *tainted = 0; |
3698 | ||
39037602 A |
3699 | size = blob->csb_hash_pagesize; |
3700 | *bytes_processed = size; | |
3701 | ||
fe8ab488 | 3702 | const uint32_t *asha1, *esha1; |
b0d623f7 | 3703 | if ((off_t)(offset + size) > codeLimit) { |
2d21ac55 A |
3704 | /* partial page at end of segment */ |
3705 | assert(offset < codeLimit); | |
39037602 | 3706 | size = (size_t) (codeLimit & blob->csb_hash_pagemask); |
c18c124e | 3707 | *tainted |= CS_VALIDATE_NX; |
2d21ac55 | 3708 | } |
3e170ce0 A |
3709 | |
3710 | hashtype->cs_init(&mdctx); | |
39037602 A |
3711 | |
3712 | if (blob->csb_hash_firstlevel_pagesize) { | |
3713 | const unsigned char *partial_data = (const unsigned char *)data; | |
3714 | size_t i; | |
3715 | for (i=0; i < size;) { | |
3716 | union cs_hash_union partialctx; | |
3717 | unsigned char partial_digest[CS_HASH_MAX_SIZE]; | |
3718 | size_t partial_size = MIN(size-i, blob->csb_hash_firstlevel_pagesize); | |
3719 | ||
3720 | hashtype->cs_init(&partialctx); | |
3721 | hashtype->cs_update(&partialctx, partial_data, partial_size); | |
3722 | hashtype->cs_final(partial_digest, &partialctx); | |
3723 | ||
3724 | /* Update cumulative multi-level hash */ | |
3725 | hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size); | |
3726 | partial_data = partial_data + partial_size; | |
3727 | i += partial_size; | |
3728 | } | |
3729 | } else { | |
3730 | hashtype->cs_update(&mdctx, data, size); | |
3731 | } | |
3e170ce0 | 3732 | hashtype->cs_final(actual_hash, &mdctx); |
2d21ac55 | 3733 | |
fe8ab488 A |
3734 | asha1 = (const uint32_t *) actual_hash; |
3735 | esha1 = (const uint32_t *) expected_hash; | |
3736 | ||
490019cf | 3737 | if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) { |
2d21ac55 A |
3738 | if (cs_debug) { |
3739 | printf("CODE SIGNING: cs_validate_page: " | |
fe8ab488 A |
3740 | "mobj %p off 0x%llx size 0x%lx: " |
3741 | "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != " | |
3742 | "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n", | |
3743 | pager, page_offset, size, | |
3744 | asha1[0], asha1[1], asha1[2], | |
3745 | asha1[3], asha1[4], | |
3746 | esha1[0], esha1[1], esha1[2], | |
3747 | esha1[3], esha1[4]); | |
2d21ac55 A |
3748 | } |
3749 | cs_validate_page_bad_hash++; | |
c18c124e | 3750 | *tainted |= CS_VALIDATE_TAINTED; |
2d21ac55 | 3751 | } else { |
39236c6e | 3752 | if (cs_debug > 10) { |
2d21ac55 | 3753 | printf("CODE SIGNING: cs_validate_page: " |
316670eb A |
3754 | "mobj %p off 0x%llx size 0x%lx: " |
3755 | "SHA1 OK\n", | |
3756 | pager, page_offset, size); | |
2d21ac55 | 3757 | } |
2d21ac55 A |
3758 | } |
3759 | validated = TRUE; | |
3760 | } | |
3761 | ||
3762 | return validated; | |
91447636 A |
3763 | } |
3764 | ||
39037602 A |
3765 | boolean_t |
3766 | cs_validate_range( | |
3767 | struct vnode *vp, | |
3768 | memory_object_t pager, | |
3769 | memory_object_offset_t page_offset, | |
3770 | const void *data, | |
3771 | vm_size_t dsize, | |
3772 | unsigned *tainted) | |
3773 | { | |
3774 | vm_size_t offset_in_range; | |
3775 | boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */ | |
3776 | ||
3777 | struct cs_blob *blobs = ubc_get_cs_blobs(vp); | |
3778 | ||
3779 | *tainted = 0; | |
3780 | ||
3781 | for (offset_in_range = 0; | |
3782 | offset_in_range < dsize; | |
3783 | /* offset_in_range updated based on bytes processed */) { | |
3784 | unsigned subrange_tainted = 0; | |
3785 | boolean_t subrange_validated; | |
3786 | vm_size_t bytes_processed = 0; | |
3787 | ||
3788 | subrange_validated = cs_validate_hash(blobs, | |
3789 | pager, | |
3790 | page_offset + offset_in_range, | |
3791 | (const void *)((const char *)data + offset_in_range), | |
3792 | &bytes_processed, | |
3793 | &subrange_tainted); | |
3794 | ||
3795 | *tainted |= subrange_tainted; | |
3796 | ||
3797 | if (bytes_processed == 0) { | |
3798 | /* Cannote make forward progress, so return an error */ | |
3799 | all_subranges_validated = FALSE; | |
3800 | break; | |
3801 | } else if (subrange_validated == FALSE) { | |
3802 | all_subranges_validated = FALSE; | |
3803 | /* Keep going to detect other types of failures in subranges */ | |
3804 | } | |
3805 | ||
3806 | offset_in_range += bytes_processed; | |
3807 | } | |
3808 | ||
3809 | return all_subranges_validated; | |
3810 | } | |
3811 | ||
2d21ac55 A |
3812 | int |
3813 | ubc_cs_getcdhash( | |
3814 | vnode_t vp, | |
3815 | off_t offset, | |
3816 | unsigned char *cdhash) | |
3817 | { | |
b0d623f7 A |
3818 | struct cs_blob *blobs, *blob; |
3819 | off_t rel_offset; | |
3820 | int ret; | |
3821 | ||
3822 | vnode_lock(vp); | |
2d21ac55 A |
3823 | |
3824 | blobs = ubc_get_cs_blobs(vp); | |
3825 | for (blob = blobs; | |
3826 | blob != NULL; | |
3827 | blob = blob->csb_next) { | |
3828 | /* compute offset relative to this blob */ | |
3829 | rel_offset = offset - blob->csb_base_offset; | |
3830 | if (rel_offset >= blob->csb_start_offset && | |
3831 | rel_offset < blob->csb_end_offset) { | |
3832 | /* this blob does cover our "offset" ! */ | |
3833 | break; | |
3834 | } | |
3835 | } | |
3836 | ||
3837 | if (blob == NULL) { | |
3838 | /* we didn't find a blob covering "offset" */ | |
b0d623f7 A |
3839 | ret = EBADEXEC; /* XXX any better error ? */ |
3840 | } else { | |
3841 | /* get the SHA1 hash of that blob */ | |
3e170ce0 | 3842 | bcopy(blob->csb_cdhash, cdhash, sizeof (blob->csb_cdhash)); |
b0d623f7 | 3843 | ret = 0; |
2d21ac55 A |
3844 | } |
3845 | ||
b0d623f7 | 3846 | vnode_unlock(vp); |
2d21ac55 | 3847 | |
b0d623f7 | 3848 | return ret; |
2d21ac55 | 3849 | } |
6d2010ae | 3850 | |
39037602 A |
3851 | boolean_t |
3852 | ubc_cs_is_range_codesigned( | |
3853 | vnode_t vp, | |
3854 | mach_vm_offset_t start, | |
3855 | mach_vm_size_t size) | |
3856 | { | |
3857 | struct cs_blob *csblob; | |
3858 | mach_vm_offset_t blob_start; | |
3859 | mach_vm_offset_t blob_end; | |
3860 | ||
3861 | if (vp == NULL) { | |
3862 | /* no file: no code signature */ | |
3863 | return FALSE; | |
3864 | } | |
3865 | if (size == 0) { | |
3866 | /* no range: no code signature */ | |
3867 | return FALSE; | |
3868 | } | |
3869 | if (start + size < start) { | |
3870 | /* overflow */ | |
3871 | return FALSE; | |
3872 | } | |
3873 | ||
3874 | csblob = ubc_cs_blob_get(vp, -1, start); | |
3875 | if (csblob == NULL) { | |
3876 | return FALSE; | |
3877 | } | |
3878 | ||
3879 | /* | |
3880 | * We currently check if the range is covered by a single blob, | |
3881 | * which should always be the case for the dyld shared cache. | |
3882 | * If we ever want to make this routine handle other cases, we | |
3883 | * would have to iterate if the blob does not cover the full range. | |
3884 | */ | |
3885 | blob_start = (mach_vm_offset_t) (csblob->csb_base_offset + | |
3886 | csblob->csb_start_offset); | |
3887 | blob_end = (mach_vm_offset_t) (csblob->csb_base_offset + | |
3888 | csblob->csb_end_offset); | |
3889 | if (blob_start > start || blob_end < (start + size)) { | |
3890 | /* range not fully covered by this code-signing blob */ | |
3891 | return FALSE; | |
3892 | } | |
3893 | ||
3894 | return TRUE; | |
3895 | } | |
3896 | ||
6d2010ae A |
3897 | #if CHECK_CS_VALIDATION_BITMAP |
3898 | #define stob(s) ((atop_64((s)) + 07) >> 3) | |
3899 | extern boolean_t root_fs_upgrade_try; | |
3900 | ||
3901 | /* | |
3902 | * Should we use the code-sign bitmap to avoid repeated code-sign validation? | |
3903 | * Depends: | |
3904 | * a) Is the target vnode on the root filesystem? | |
3905 | * b) Has someone tried to mount the root filesystem read-write? | |
3906 | * If answers are (a) yes AND (b) no, then we can use the bitmap. | |
3907 | */ | |
3908 | #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) | |
3909 | kern_return_t | |
3910 | ubc_cs_validation_bitmap_allocate( | |
3911 | vnode_t vp) | |
3912 | { | |
3913 | kern_return_t kr = KERN_SUCCESS; | |
3914 | struct ubc_info *uip; | |
3915 | char *target_bitmap; | |
3916 | vm_object_size_t bitmap_size; | |
3917 | ||
3918 | if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) { | |
3919 | kr = KERN_INVALID_ARGUMENT; | |
3920 | } else { | |
3921 | uip = vp->v_ubcinfo; | |
3922 | ||
3923 | if ( uip->cs_valid_bitmap == NULL ) { | |
3924 | bitmap_size = stob(uip->ui_size); | |
3925 | target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size ); | |
3926 | if (target_bitmap == 0) { | |
3927 | kr = KERN_NO_SPACE; | |
3928 | } else { | |
3929 | kr = KERN_SUCCESS; | |
3930 | } | |
3931 | if( kr == KERN_SUCCESS ) { | |
3932 | memset( target_bitmap, 0, (size_t)bitmap_size); | |
3933 | uip->cs_valid_bitmap = (void*)target_bitmap; | |
3934 | uip->cs_valid_bitmap_size = bitmap_size; | |
3935 | } | |
3936 | } | |
3937 | } | |
3938 | return kr; | |
3939 | } | |
3940 | ||
3941 | kern_return_t | |
3942 | ubc_cs_check_validation_bitmap ( | |
3943 | vnode_t vp, | |
3944 | memory_object_offset_t offset, | |
3945 | int optype) | |
3946 | { | |
3947 | kern_return_t kr = KERN_SUCCESS; | |
3948 | ||
3949 | if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) { | |
3950 | kr = KERN_INVALID_ARGUMENT; | |
3951 | } else { | |
3952 | struct ubc_info *uip = vp->v_ubcinfo; | |
3953 | char *target_bitmap = uip->cs_valid_bitmap; | |
3954 | ||
3955 | if ( target_bitmap == NULL ) { | |
3956 | kr = KERN_INVALID_ARGUMENT; | |
3957 | } else { | |
3958 | uint64_t bit, byte; | |
3959 | bit = atop_64( offset ); | |
3960 | byte = bit >> 3; | |
3961 | ||
3962 | if ( byte > uip->cs_valid_bitmap_size ) { | |
3963 | kr = KERN_INVALID_ARGUMENT; | |
3964 | } else { | |
3965 | ||
3966 | if (optype == CS_BITMAP_SET) { | |
3967 | target_bitmap[byte] |= (1 << (bit & 07)); | |
3968 | kr = KERN_SUCCESS; | |
3969 | } else if (optype == CS_BITMAP_CLEAR) { | |
3970 | target_bitmap[byte] &= ~(1 << (bit & 07)); | |
3971 | kr = KERN_SUCCESS; | |
3972 | } else if (optype == CS_BITMAP_CHECK) { | |
3973 | if ( target_bitmap[byte] & (1 << (bit & 07))) { | |
3974 | kr = KERN_SUCCESS; | |
3975 | } else { | |
3976 | kr = KERN_FAILURE; | |
3977 | } | |
3978 | } | |
3979 | } | |
3980 | } | |
3981 | } | |
3982 | return kr; | |
3983 | } | |
3984 | ||
3985 | void | |
3986 | ubc_cs_validation_bitmap_deallocate( | |
3987 | vnode_t vp) | |
3988 | { | |
3989 | struct ubc_info *uip; | |
3990 | void *target_bitmap; | |
3991 | vm_object_size_t bitmap_size; | |
3992 | ||
3993 | if ( UBCINFOEXISTS(vp)) { | |
3994 | uip = vp->v_ubcinfo; | |
3995 | ||
3996 | if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) { | |
3997 | bitmap_size = uip->cs_valid_bitmap_size; | |
3998 | kfree( target_bitmap, (vm_size_t) bitmap_size ); | |
3999 | uip->cs_valid_bitmap = NULL; | |
4000 | } | |
4001 | } | |
4002 | } | |
4003 | #else | |
4004 | kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){ | |
4005 | return KERN_INVALID_ARGUMENT; | |
4006 | } | |
4007 | ||
4008 | kern_return_t ubc_cs_check_validation_bitmap( | |
4009 | __unused struct vnode *vp, | |
4010 | __unused memory_object_offset_t offset, | |
4011 | __unused int optype){ | |
4012 | ||
4013 | return KERN_INVALID_ARGUMENT; | |
4014 | } | |
4015 | ||
4016 | void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ | |
4017 | return; | |
4018 | } | |
4019 | #endif /* CHECK_CS_VALIDATION_BITMAP */ |