]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2008-2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #if !FS_COMPRESSION | |
29 | ||
30 | /* We need these symbols even though compression is turned off */ | |
31 | ||
32 | #define UNUSED_SYMBOL(x) asm(".global _" #x "\n.set _" #x ", 0\n"); | |
33 | ||
34 | UNUSED_SYMBOL(register_decmpfs_decompressor) | |
35 | UNUSED_SYMBOL(unregister_decmpfs_decompressor) | |
36 | UNUSED_SYMBOL(decmpfs_init) | |
37 | UNUSED_SYMBOL(decmpfs_read_compressed) | |
38 | UNUSED_SYMBOL(decmpfs_cnode_cmp_type) | |
39 | UNUSED_SYMBOL(decmpfs_cnode_get_vnode_state) | |
40 | UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_size) | |
41 | UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_nchildren) | |
42 | UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_total_size) | |
43 | UNUSED_SYMBOL(decmpfs_lock_compressed_data) | |
44 | UNUSED_SYMBOL(decmpfs_cnode_free) | |
45 | UNUSED_SYMBOL(decmpfs_cnode_alloc) | |
46 | UNUSED_SYMBOL(decmpfs_cnode_destroy) | |
47 | UNUSED_SYMBOL(decmpfs_decompress_file) | |
48 | UNUSED_SYMBOL(decmpfs_unlock_compressed_data) | |
49 | UNUSED_SYMBOL(decmpfs_cnode_init) | |
50 | UNUSED_SYMBOL(decmpfs_cnode_set_vnode_state) | |
51 | UNUSED_SYMBOL(decmpfs_hides_xattr) | |
52 | UNUSED_SYMBOL(decmpfs_ctx) | |
53 | UNUSED_SYMBOL(decmpfs_file_is_compressed) | |
54 | UNUSED_SYMBOL(decmpfs_update_attributes) | |
55 | UNUSED_SYMBOL(decmpfs_hides_rsrc) | |
56 | UNUSED_SYMBOL(decmpfs_pagein_compressed) | |
57 | UNUSED_SYMBOL(decmpfs_validate_compressed_file) | |
58 | ||
59 | #else /* FS_COMPRESSION */ | |
60 | #include <sys/kernel.h> | |
61 | #include <sys/vnode_internal.h> | |
62 | #include <sys/file_internal.h> | |
63 | #include <sys/stat.h> | |
64 | #include <sys/fcntl.h> | |
65 | #include <sys/xattr.h> | |
66 | #include <sys/namei.h> | |
67 | #include <sys/user.h> | |
68 | #include <sys/mount_internal.h> | |
69 | #include <sys/ubc.h> | |
70 | #include <sys/decmpfs.h> | |
71 | #include <sys/uio_internal.h> | |
72 | #include <libkern/OSByteOrder.h> | |
73 | #include <libkern/section_keywords.h> | |
74 | ||
75 | #include <ptrauth.h> | |
76 | ||
77 | #pragma mark --- debugging --- | |
78 | ||
79 | #define COMPRESSION_DEBUG 0 | |
80 | #define COMPRESSION_DEBUG_VERBOSE 0 | |
81 | #define MALLOC_DEBUG 0 | |
82 | ||
83 | #if COMPRESSION_DEBUG | |
84 | static char* | |
85 | vnpath(vnode_t vp, char *path, int len) | |
86 | { | |
87 | int origlen = len; | |
88 | path[0] = 0; | |
89 | vn_getpath(vp, path, &len); | |
90 | path[origlen - 1] = 0; | |
91 | return path; | |
92 | } | |
93 | #endif | |
94 | ||
95 | #define ErrorLog(x, args...) \ | |
96 | printf("%s:%d:%s: " x, __FILE_NAME__, __LINE__, __FUNCTION__, ## args) | |
97 | #if COMPRESSION_DEBUG | |
98 | #define ErrorLogWithPath(x, args...) do { \ | |
99 | char *path = zalloc(ZV_NAMEI); \ | |
100 | printf("%s:%d:%s: %s: " x, __FILE_NAME__, __LINE__, __FUNCTION__, \ | |
101 | vnpath(vp, path, PATH_MAX), ## args); \ | |
102 | zfree(ZV_NAMEI, path); \ | |
103 | } while(0) | |
104 | #else | |
105 | #define ErrorLogWithPath(x, args...) do { \ | |
106 | (void*)vp; \ | |
107 | printf("%s:%d:%s: %s: " x, __FILE_NAME__, __LINE__, __FUNCTION__, \ | |
108 | "<private>", ## args); \ | |
109 | } while(0) | |
110 | #endif | |
111 | ||
112 | #if COMPRESSION_DEBUG | |
113 | #define DebugLog ErrorLog | |
114 | #define DebugLogWithPath ErrorLogWithPath | |
115 | #else | |
116 | #define DebugLog(x...) do { } while(0) | |
117 | #define DebugLogWithPath(x...) do { } while(0) | |
118 | #endif | |
119 | ||
120 | #if COMPRESSION_DEBUG_VERBOSE | |
121 | #define VerboseLog ErrorLog | |
122 | #define VerboseLogWithPath ErrorLogWithPath | |
123 | #else | |
124 | #define VerboseLog(x...) do { } while(0) | |
125 | #define VerboseLogWithPath(x...) do { } while(0) | |
126 | #endif | |
127 | ||
128 | #pragma mark --- globals --- | |
129 | ||
130 | static LCK_GRP_DECLARE(decmpfs_lockgrp, "VFSCOMP"); | |
131 | static LCK_RW_DECLARE(decompressorsLock, &decmpfs_lockgrp); | |
132 | static LCK_MTX_DECLARE(decompress_channel_mtx, &decmpfs_lockgrp); | |
133 | ||
134 | static const decmpfs_registration *decompressors[CMP_MAX]; /* the registered compressors */ | |
135 | static int decompress_channel; /* channel used by decompress_file to wake up waiters */ | |
136 | ||
137 | vfs_context_t decmpfs_ctx; | |
138 | ||
139 | #pragma mark --- decmp_get_func --- | |
140 | ||
141 | #define offsetof_func(func) ((uintptr_t)offsetof(decmpfs_registration, func)) | |
142 | ||
143 | static void * | |
144 | _func_from_offset(uint32_t type, uintptr_t offset, uint32_t discriminator) | |
145 | { | |
146 | /* get the function at the given offset in the registration for the given type */ | |
147 | const decmpfs_registration *reg = decompressors[type]; | |
148 | ||
149 | switch (reg->decmpfs_registration) { | |
150 | case DECMPFS_REGISTRATION_VERSION_V1: | |
151 | if (offset > offsetof_func(free_data)) { | |
152 | return NULL; | |
153 | } | |
154 | break; | |
155 | case DECMPFS_REGISTRATION_VERSION_V3: | |
156 | if (offset > offsetof_func(get_flags)) { | |
157 | return NULL; | |
158 | } | |
159 | break; | |
160 | default: | |
161 | return NULL; | |
162 | } | |
163 | ||
164 | void *ptr = *(void * const *)((const void *)reg + offset); | |
165 | if (ptr != NULL) { | |
166 | /* Resign as a function-in-void* */ | |
167 | ptr = ptrauth_auth_and_resign(ptr, ptrauth_key_asia, discriminator, ptrauth_key_asia, 0); | |
168 | } | |
169 | return ptr; | |
170 | } | |
171 | ||
172 | extern void IOServicePublishResource( const char * property, boolean_t value ); | |
173 | extern boolean_t IOServiceWaitForMatchingResource( const char * property, uint64_t timeout ); | |
174 | extern boolean_t IOCatalogueMatchingDriversPresent( const char * property ); | |
175 | ||
176 | static void * | |
177 | _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset, uint32_t discriminator) | |
178 | { | |
179 | /* | |
180 | * this function should be called while holding a shared lock to decompressorsLock, | |
181 | * and will return with the lock held | |
182 | */ | |
183 | ||
184 | if (type >= CMP_MAX) { | |
185 | return NULL; | |
186 | } | |
187 | ||
188 | if (decompressors[type] != NULL) { | |
189 | // the compressor has already registered but the function might be null | |
190 | return _func_from_offset(type, offset, discriminator); | |
191 | } | |
192 | ||
193 | // does IOKit know about a kext that is supposed to provide this type? | |
194 | char providesName[80]; | |
195 | snprintf(providesName, sizeof(providesName), "com.apple.AppleFSCompression.providesType%u", type); | |
196 | if (IOCatalogueMatchingDriversPresent(providesName)) { | |
197 | // there is a kext that says it will register for this type, so let's wait for it | |
198 | char resourceName[80]; | |
199 | uint64_t delay = 10000000ULL; // 10 milliseconds. | |
200 | snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", type); | |
201 | ErrorLogWithPath("waiting for %s\n", resourceName); | |
202 | while (decompressors[type] == NULL) { | |
203 | lck_rw_unlock_shared(&decompressorsLock); // we have to unlock to allow the kext to register | |
204 | if (IOServiceWaitForMatchingResource(resourceName, delay)) { | |
205 | lck_rw_lock_shared(&decompressorsLock); | |
206 | break; | |
207 | } | |
208 | if (!IOCatalogueMatchingDriversPresent(providesName)) { | |
209 | // | |
210 | ErrorLogWithPath("the kext with %s is no longer present\n", providesName); | |
211 | lck_rw_lock_shared(&decompressorsLock); | |
212 | break; | |
213 | } | |
214 | ErrorLogWithPath("still waiting for %s\n", resourceName); | |
215 | delay *= 2; | |
216 | lck_rw_lock_shared(&decompressorsLock); | |
217 | } | |
218 | // IOKit says the kext is loaded, so it should be registered too! | |
219 | if (decompressors[type] == NULL) { | |
220 | ErrorLogWithPath("we found %s, but the type still isn't registered\n", providesName); | |
221 | return NULL; | |
222 | } | |
223 | // it's now registered, so let's return the function | |
224 | return _func_from_offset(type, offset, discriminator); | |
225 | } | |
226 | ||
227 | // the compressor hasn't registered, so it never will unless someone manually kextloads it | |
228 | ErrorLogWithPath("tried to access a compressed file of unregistered type %d\n", type); | |
229 | return NULL; | |
230 | } | |
231 | ||
232 | #define decmp_get_func(vp, type, func) (typeof(decompressors[0]->func))_decmp_get_func(vp, type, offsetof_func(func), ptrauth_function_pointer_type_discriminator(typeof(decompressors[0]->func))) | |
233 | ||
234 | #pragma mark --- utilities --- | |
235 | ||
236 | #if COMPRESSION_DEBUG | |
237 | static int | |
238 | vnsize(vnode_t vp, uint64_t *size) | |
239 | { | |
240 | struct vnode_attr va; | |
241 | VATTR_INIT(&va); | |
242 | VATTR_WANTED(&va, va_data_size); | |
243 | int error = vnode_getattr(vp, &va, decmpfs_ctx); | |
244 | if (error != 0) { | |
245 | ErrorLogWithPath("vnode_getattr err %d\n", error); | |
246 | return error; | |
247 | } | |
248 | *size = va.va_data_size; | |
249 | return 0; | |
250 | } | |
251 | #endif /* COMPRESSION_DEBUG */ | |
252 | ||
253 | #pragma mark --- cnode routines --- | |
254 | ||
255 | ZONE_DECLARE(decmpfs_cnode_zone, "decmpfs_cnode", | |
256 | sizeof(struct decmpfs_cnode), ZC_NONE); | |
257 | ||
258 | decmpfs_cnode * | |
259 | decmpfs_cnode_alloc(void) | |
260 | { | |
261 | return zalloc(decmpfs_cnode_zone); | |
262 | } | |
263 | ||
264 | void | |
265 | decmpfs_cnode_free(decmpfs_cnode *dp) | |
266 | { | |
267 | zfree(decmpfs_cnode_zone, dp); | |
268 | } | |
269 | ||
270 | void | |
271 | decmpfs_cnode_init(decmpfs_cnode *cp) | |
272 | { | |
273 | memset(cp, 0, sizeof(*cp)); | |
274 | lck_rw_init(&cp->compressed_data_lock, &decmpfs_lockgrp, NULL); | |
275 | } | |
276 | ||
277 | void | |
278 | decmpfs_cnode_destroy(decmpfs_cnode *cp) | |
279 | { | |
280 | lck_rw_destroy(&cp->compressed_data_lock, &decmpfs_lockgrp); | |
281 | } | |
282 | ||
283 | bool | |
284 | decmpfs_trylock_compressed_data(decmpfs_cnode *cp, int exclusive) | |
285 | { | |
286 | void *thread = current_thread(); | |
287 | bool retval = false; | |
288 | ||
289 | if (cp->lockowner == thread) { | |
290 | /* this thread is already holding an exclusive lock, so bump the count */ | |
291 | cp->lockcount++; | |
292 | retval = true; | |
293 | } else if (exclusive) { | |
294 | if ((retval = lck_rw_try_lock_exclusive(&cp->compressed_data_lock))) { | |
295 | cp->lockowner = thread; | |
296 | cp->lockcount = 1; | |
297 | } | |
298 | } else { | |
299 | if ((retval = lck_rw_try_lock_shared(&cp->compressed_data_lock))) { | |
300 | cp->lockowner = (void *)-1; | |
301 | } | |
302 | } | |
303 | return retval; | |
304 | } | |
305 | ||
306 | void | |
307 | decmpfs_lock_compressed_data(decmpfs_cnode *cp, int exclusive) | |
308 | { | |
309 | void *thread = current_thread(); | |
310 | ||
311 | if (cp->lockowner == thread) { | |
312 | /* this thread is already holding an exclusive lock, so bump the count */ | |
313 | cp->lockcount++; | |
314 | } else if (exclusive) { | |
315 | lck_rw_lock_exclusive(&cp->compressed_data_lock); | |
316 | cp->lockowner = thread; | |
317 | cp->lockcount = 1; | |
318 | } else { | |
319 | lck_rw_lock_shared(&cp->compressed_data_lock); | |
320 | cp->lockowner = (void *)-1; | |
321 | } | |
322 | } | |
323 | ||
324 | void | |
325 | decmpfs_unlock_compressed_data(decmpfs_cnode *cp, __unused int exclusive) | |
326 | { | |
327 | void *thread = current_thread(); | |
328 | ||
329 | if (cp->lockowner == thread) { | |
330 | /* this thread is holding an exclusive lock, so decrement the count */ | |
331 | if ((--cp->lockcount) > 0) { | |
332 | /* the caller still has outstanding locks, so we're done */ | |
333 | return; | |
334 | } | |
335 | cp->lockowner = NULL; | |
336 | } | |
337 | ||
338 | lck_rw_done(&cp->compressed_data_lock); | |
339 | } | |
340 | ||
341 | uint32_t | |
342 | decmpfs_cnode_get_vnode_state(decmpfs_cnode *cp) | |
343 | { | |
344 | return cp->cmp_state; | |
345 | } | |
346 | ||
347 | void | |
348 | decmpfs_cnode_set_vnode_state(decmpfs_cnode *cp, uint32_t state, int skiplock) | |
349 | { | |
350 | if (!skiplock) { | |
351 | decmpfs_lock_compressed_data(cp, 1); | |
352 | } | |
353 | cp->cmp_state = (uint8_t)state; | |
354 | if (state == FILE_TYPE_UNKNOWN) { | |
355 | /* clear out the compression type too */ | |
356 | cp->cmp_type = 0; | |
357 | } | |
358 | if (!skiplock) { | |
359 | decmpfs_unlock_compressed_data(cp, 1); | |
360 | } | |
361 | } | |
362 | ||
363 | static void | |
364 | decmpfs_cnode_set_vnode_cmp_type(decmpfs_cnode *cp, uint32_t cmp_type, int skiplock) | |
365 | { | |
366 | if (!skiplock) { | |
367 | decmpfs_lock_compressed_data(cp, 1); | |
368 | } | |
369 | cp->cmp_type = cmp_type; | |
370 | if (!skiplock) { | |
371 | decmpfs_unlock_compressed_data(cp, 1); | |
372 | } | |
373 | } | |
374 | ||
375 | static void | |
376 | decmpfs_cnode_set_vnode_minimal_xattr(decmpfs_cnode *cp, int minimal_xattr, int skiplock) | |
377 | { | |
378 | if (!skiplock) { | |
379 | decmpfs_lock_compressed_data(cp, 1); | |
380 | } | |
381 | cp->cmp_minimal_xattr = !!minimal_xattr; | |
382 | if (!skiplock) { | |
383 | decmpfs_unlock_compressed_data(cp, 1); | |
384 | } | |
385 | } | |
386 | ||
387 | uint64_t | |
388 | decmpfs_cnode_get_vnode_cached_size(decmpfs_cnode *cp) | |
389 | { | |
390 | return cp->uncompressed_size; | |
391 | } | |
392 | ||
393 | uint64_t | |
394 | decmpfs_cnode_get_vnode_cached_nchildren(decmpfs_cnode *cp) | |
395 | { | |
396 | return cp->nchildren; | |
397 | } | |
398 | ||
399 | uint64_t | |
400 | decmpfs_cnode_get_vnode_cached_total_size(decmpfs_cnode *cp) | |
401 | { | |
402 | return cp->total_size; | |
403 | } | |
404 | ||
405 | void | |
406 | decmpfs_cnode_set_vnode_cached_size(decmpfs_cnode *cp, uint64_t size) | |
407 | { | |
408 | while (1) { | |
409 | uint64_t old = cp->uncompressed_size; | |
410 | if (OSCompareAndSwap64(old, size, (UInt64*)&cp->uncompressed_size)) { | |
411 | return; | |
412 | } else { | |
413 | /* failed to write our value, so loop */ | |
414 | } | |
415 | } | |
416 | } | |
417 | ||
418 | void | |
419 | decmpfs_cnode_set_vnode_cached_nchildren(decmpfs_cnode *cp, uint64_t nchildren) | |
420 | { | |
421 | while (1) { | |
422 | uint64_t old = cp->nchildren; | |
423 | if (OSCompareAndSwap64(old, nchildren, (UInt64*)&cp->nchildren)) { | |
424 | return; | |
425 | } else { | |
426 | /* failed to write our value, so loop */ | |
427 | } | |
428 | } | |
429 | } | |
430 | ||
431 | void | |
432 | decmpfs_cnode_set_vnode_cached_total_size(decmpfs_cnode *cp, uint64_t total_sz) | |
433 | { | |
434 | while (1) { | |
435 | uint64_t old = cp->total_size; | |
436 | if (OSCompareAndSwap64(old, total_sz, (UInt64*)&cp->total_size)) { | |
437 | return; | |
438 | } else { | |
439 | /* failed to write our value, so loop */ | |
440 | } | |
441 | } | |
442 | } | |
443 | ||
444 | static uint64_t | |
445 | decmpfs_cnode_get_decompression_flags(decmpfs_cnode *cp) | |
446 | { | |
447 | return cp->decompression_flags; | |
448 | } | |
449 | ||
450 | static void | |
451 | decmpfs_cnode_set_decompression_flags(decmpfs_cnode *cp, uint64_t flags) | |
452 | { | |
453 | while (1) { | |
454 | uint64_t old = cp->decompression_flags; | |
455 | if (OSCompareAndSwap64(old, flags, (UInt64*)&cp->decompression_flags)) { | |
456 | return; | |
457 | } else { | |
458 | /* failed to write our value, so loop */ | |
459 | } | |
460 | } | |
461 | } | |
462 | ||
463 | uint32_t | |
464 | decmpfs_cnode_cmp_type(decmpfs_cnode *cp) | |
465 | { | |
466 | return cp->cmp_type; | |
467 | } | |
468 | ||
469 | #pragma mark --- decmpfs state routines --- | |
470 | ||
471 | static int | |
472 | decmpfs_fetch_compressed_header(vnode_t vp, decmpfs_cnode *cp, decmpfs_header **hdrOut, int returnInvalid, size_t *hdr_size) | |
473 | { | |
474 | /* | |
475 | * fetches vp's compression xattr, converting it into a decmpfs_header; returns 0 or errno | |
476 | * if returnInvalid == 1, returns the header even if the type was invalid (out of range), | |
477 | * and return ERANGE in that case | |
478 | */ | |
479 | ||
480 | size_t read_size = 0; | |
481 | size_t attr_size = 0; | |
482 | size_t alloc_size = 0; | |
483 | uio_t attr_uio = NULL; | |
484 | int err = 0; | |
485 | char *data = NULL; | |
486 | const bool no_additional_data = ((cp != NULL) | |
487 | && (cp->cmp_type != 0) | |
488 | && (cp->cmp_minimal_xattr != 0)); | |
489 | char uio_buf[UIO_SIZEOF(1)]; | |
490 | decmpfs_header *hdr = NULL; | |
491 | ||
492 | /* | |
493 | * Trace the following parameters on entry with event-id 0x03120004 | |
494 | * | |
495 | * @vp->v_id: vnode-id for which to fetch compressed header. | |
496 | * @no_additional_data: If set true then xattr didn't have any extra data. | |
497 | * @returnInvalid: return the header even though the type is out of range. | |
498 | */ | |
499 | DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, | |
500 | no_additional_data, returnInvalid); | |
501 | ||
502 | if (no_additional_data) { | |
503 | /* this file's xattr didn't have any extra data when we fetched it, so we can synthesize a header from the data in the cnode */ | |
504 | ||
505 | alloc_size = sizeof(decmpfs_header); | |
506 | data = kheap_alloc(KHEAP_TEMP, alloc_size, Z_WAITOK); | |
507 | if (!data) { | |
508 | err = ENOMEM; | |
509 | goto out; | |
510 | } | |
511 | hdr = (decmpfs_header*)data; | |
512 | hdr->attr_size = sizeof(decmpfs_disk_header); | |
513 | hdr->compression_magic = DECMPFS_MAGIC; | |
514 | hdr->compression_type = cp->cmp_type; | |
515 | if (hdr->compression_type == DATALESS_PKG_CMPFS_TYPE) { | |
516 | if (!vnode_isdir(vp)) { | |
517 | err = EINVAL; | |
518 | goto out; | |
519 | } | |
520 | hdr->_size.value = DECMPFS_PKG_VALUE_FROM_SIZE_COUNT( | |
521 | decmpfs_cnode_get_vnode_cached_size(cp), | |
522 | decmpfs_cnode_get_vnode_cached_nchildren(cp)); | |
523 | } else if (vnode_isdir(vp)) { | |
524 | hdr->_size.value = decmpfs_cnode_get_vnode_cached_nchildren(cp); | |
525 | } else { | |
526 | hdr->_size.value = decmpfs_cnode_get_vnode_cached_size(cp); | |
527 | } | |
528 | } else { | |
529 | /* figure out how big the xattr is on disk */ | |
530 | err = vn_getxattr(vp, DECMPFS_XATTR_NAME, NULL, &attr_size, XATTR_NOSECURITY, decmpfs_ctx); | |
531 | if (err != 0) { | |
532 | goto out; | |
533 | } | |
534 | alloc_size = attr_size + sizeof(hdr->attr_size); | |
535 | ||
536 | if (attr_size < sizeof(decmpfs_disk_header) || attr_size > MAX_DECMPFS_XATTR_SIZE) { | |
537 | err = EINVAL; | |
538 | goto out; | |
539 | } | |
540 | ||
541 | /* allocation includes space for the extra attr_size field of a compressed_header */ | |
542 | data = kheap_alloc(KHEAP_TEMP, alloc_size, Z_WAITOK); | |
543 | if (!data) { | |
544 | err = ENOMEM; | |
545 | goto out; | |
546 | } | |
547 | ||
548 | /* read the xattr into our buffer, skipping over the attr_size field at the beginning */ | |
549 | attr_uio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); | |
550 | uio_addiov(attr_uio, CAST_USER_ADDR_T(data + sizeof(hdr->attr_size)), attr_size); | |
551 | ||
552 | err = vn_getxattr(vp, DECMPFS_XATTR_NAME, attr_uio, &read_size, XATTR_NOSECURITY, decmpfs_ctx); | |
553 | if (err != 0) { | |
554 | goto out; | |
555 | } | |
556 | if (read_size != attr_size) { | |
557 | err = EINVAL; | |
558 | goto out; | |
559 | } | |
560 | hdr = (decmpfs_header*)data; | |
561 | hdr->attr_size = (uint32_t)attr_size; | |
562 | /* swap the fields to native endian */ | |
563 | hdr->compression_magic = OSSwapLittleToHostInt32(hdr->compression_magic); | |
564 | hdr->compression_type = OSSwapLittleToHostInt32(hdr->compression_type); | |
565 | hdr->uncompressed_size = OSSwapLittleToHostInt64(hdr->uncompressed_size); | |
566 | } | |
567 | ||
568 | if (hdr->compression_magic != DECMPFS_MAGIC) { | |
569 | ErrorLogWithPath("invalid compression_magic 0x%08x, should be 0x%08x\n", hdr->compression_magic, DECMPFS_MAGIC); | |
570 | err = EINVAL; | |
571 | goto out; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Special-case the DATALESS compressor here; that is a valid type, | |
576 | * even through there will never be an entry in the decompressor | |
577 | * handler table for it. If we don't do this, then the cmp_state | |
578 | * for this cnode will end up being marked NOT_COMPRESSED, and | |
579 | * we'll be stuck in limbo. | |
580 | */ | |
581 | if (hdr->compression_type >= CMP_MAX && !decmpfs_type_is_dataless(hdr->compression_type)) { | |
582 | if (returnInvalid) { | |
583 | /* return the header even though the type is out of range */ | |
584 | err = ERANGE; | |
585 | } else { | |
586 | ErrorLogWithPath("compression_type %d out of range\n", hdr->compression_type); | |
587 | err = EINVAL; | |
588 | } | |
589 | goto out; | |
590 | } | |
591 | ||
592 | out: | |
593 | if (err && (err != ERANGE)) { | |
594 | DebugLogWithPath("err %d\n", err); | |
595 | kheap_free(KHEAP_TEMP, data, alloc_size); | |
596 | *hdrOut = NULL; | |
597 | } else { | |
598 | *hdrOut = hdr; | |
599 | *hdr_size = alloc_size; | |
600 | } | |
601 | /* | |
602 | * Trace the following parameters on return with event-id 0x03120004. | |
603 | * | |
604 | * @vp->v_id: vnode-id for which to fetch compressed header. | |
605 | * @err: value returned from this function. | |
606 | */ | |
607 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, err); | |
608 | return err; | |
609 | } | |
610 | ||
611 | static int | |
612 | decmpfs_fast_get_state(decmpfs_cnode *cp) | |
613 | { | |
614 | /* | |
615 | * return the cached state | |
616 | * this should *only* be called when we know that decmpfs_file_is_compressed has already been called, | |
617 | * because this implies that the cached state is valid | |
618 | */ | |
619 | int cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
620 | ||
621 | switch (cmp_state) { | |
622 | case FILE_IS_NOT_COMPRESSED: | |
623 | case FILE_IS_COMPRESSED: | |
624 | case FILE_IS_CONVERTING: | |
625 | return cmp_state; | |
626 | case FILE_TYPE_UNKNOWN: | |
627 | /* | |
628 | * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, | |
629 | * which should not be possible | |
630 | */ | |
631 | ErrorLog("decmpfs_fast_get_state called on unknown file\n"); | |
632 | return FILE_IS_NOT_COMPRESSED; | |
633 | default: | |
634 | /* */ | |
635 | ErrorLog("unknown cmp_state %d\n", cmp_state); | |
636 | return FILE_IS_NOT_COMPRESSED; | |
637 | } | |
638 | } | |
639 | ||
640 | static int | |
641 | decmpfs_fast_file_is_compressed(decmpfs_cnode *cp) | |
642 | { | |
643 | int cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
644 | ||
645 | switch (cmp_state) { | |
646 | case FILE_IS_NOT_COMPRESSED: | |
647 | return 0; | |
648 | case FILE_IS_COMPRESSED: | |
649 | case FILE_IS_CONVERTING: | |
650 | return 1; | |
651 | case FILE_TYPE_UNKNOWN: | |
652 | /* | |
653 | * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, | |
654 | * which should not be possible | |
655 | */ | |
656 | ErrorLog("decmpfs_fast_get_state called on unknown file\n"); | |
657 | return 0; | |
658 | default: | |
659 | /* */ | |
660 | ErrorLog("unknown cmp_state %d\n", cmp_state); | |
661 | return 0; | |
662 | } | |
663 | } | |
664 | ||
665 | errno_t | |
666 | decmpfs_validate_compressed_file(vnode_t vp, decmpfs_cnode *cp) | |
667 | { | |
668 | /* give a compressor a chance to indicate that a compressed file is invalid */ | |
669 | decmpfs_header *hdr = NULL; | |
670 | size_t alloc_size = 0; | |
671 | errno_t err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0, &alloc_size); | |
672 | ||
673 | if (err) { | |
674 | /* we couldn't get the header */ | |
675 | if (decmpfs_fast_get_state(cp) == FILE_IS_NOT_COMPRESSED) { | |
676 | /* the file is no longer compressed, so return success */ | |
677 | err = 0; | |
678 | } | |
679 | goto out; | |
680 | } | |
681 | ||
682 | if (!decmpfs_type_is_dataless(hdr->compression_type)) { | |
683 | lck_rw_lock_shared(&decompressorsLock); | |
684 | decmpfs_validate_compressed_file_func validate = decmp_get_func(vp, hdr->compression_type, validate); | |
685 | if (validate) { /* make sure this validation function is valid */ | |
686 | /* is the data okay? */ | |
687 | err = validate(vp, decmpfs_ctx, hdr); | |
688 | } else if (decmp_get_func(vp, hdr->compression_type, fetch) == NULL) { | |
689 | /* the type isn't registered */ | |
690 | err = EIO; | |
691 | } else { | |
692 | /* no validate registered, so nothing to do */ | |
693 | err = 0; | |
694 | } | |
695 | lck_rw_unlock_shared(&decompressorsLock); | |
696 | } | |
697 | out: | |
698 | if (hdr != NULL) { | |
699 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
700 | } | |
701 | #if COMPRESSION_DEBUG | |
702 | if (err) { | |
703 | DebugLogWithPath("decmpfs_validate_compressed_file ret %d, vp->v_flag %d\n", err, vp->v_flag); | |
704 | } | |
705 | #endif | |
706 | return err; | |
707 | } | |
708 | ||
709 | int | |
710 | decmpfs_file_is_compressed(vnode_t vp, decmpfs_cnode *cp) | |
711 | { | |
712 | /* | |
713 | * determines whether vp points to a compressed file | |
714 | * | |
715 | * to speed up this operation, we cache the result in the cnode, and do as little as possible | |
716 | * in the case where the cnode already has a valid cached state | |
717 | * | |
718 | */ | |
719 | ||
720 | int ret = 0; | |
721 | int error = 0; | |
722 | uint32_t cmp_state; | |
723 | struct vnode_attr va_fetch; | |
724 | decmpfs_header *hdr = NULL; | |
725 | size_t alloc_size = 0; | |
726 | mount_t mp = NULL; | |
727 | int cnode_locked = 0; | |
728 | int saveInvalid = 0; // save the header data even though the type was out of range | |
729 | uint64_t decompression_flags = 0; | |
730 | bool is_mounted, is_local_fs; | |
731 | ||
732 | if (vnode_isnamedstream(vp)) { | |
733 | /* | |
734 | * named streams can't be compressed | |
735 | * since named streams of the same file share the same cnode, | |
736 | * we don't want to get/set the state in the cnode, just return 0 | |
737 | */ | |
738 | return 0; | |
739 | } | |
740 | ||
741 | /* examine the cached a state in this cnode */ | |
742 | cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
743 | switch (cmp_state) { | |
744 | case FILE_IS_NOT_COMPRESSED: | |
745 | return 0; | |
746 | case FILE_IS_COMPRESSED: | |
747 | return 1; | |
748 | case FILE_IS_CONVERTING: | |
749 | /* treat the file as compressed, because this gives us a way to block future reads until decompression is done */ | |
750 | return 1; | |
751 | case FILE_TYPE_UNKNOWN: | |
752 | /* the first time we encountered this vnode, so we need to check it out */ | |
753 | break; | |
754 | default: | |
755 | /* unknown state, assume file is not compressed */ | |
756 | ErrorLogWithPath("unknown cmp_state %d\n", cmp_state); | |
757 | return 0; | |
758 | } | |
759 | ||
760 | is_mounted = false; | |
761 | is_local_fs = false; | |
762 | mp = vnode_mount(vp); | |
763 | if (mp) { | |
764 | is_mounted = true; | |
765 | } | |
766 | if (is_mounted) { | |
767 | is_local_fs = ((mp->mnt_flag & MNT_LOCAL)); | |
768 | } | |
769 | /* | |
770 | * Trace the following parameters on entry with event-id 0x03120014. | |
771 | * | |
772 | * @vp->v_id: vnode-id of the file being queried. | |
773 | * @is_mounted: set to true if @vp belongs to a mounted fs. | |
774 | * @is_local_fs: set to true if @vp belongs to local fs. | |
775 | */ | |
776 | DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, | |
777 | is_mounted, is_local_fs); | |
778 | ||
779 | if (!is_mounted) { | |
780 | /* | |
781 | * this should only be true before we mount the root filesystem | |
782 | * we short-cut this return to avoid the call to getattr below, which | |
783 | * will fail before root is mounted | |
784 | */ | |
785 | ret = FILE_IS_NOT_COMPRESSED; | |
786 | goto done; | |
787 | } | |
788 | ||
789 | if (!is_local_fs) { | |
790 | /* compression only supported on local filesystems */ | |
791 | ret = FILE_IS_NOT_COMPRESSED; | |
792 | goto done; | |
793 | } | |
794 | ||
795 | /* lock our cnode data so that another caller doesn't change the state under us */ | |
796 | decmpfs_lock_compressed_data(cp, 1); | |
797 | cnode_locked = 1; | |
798 | ||
799 | VATTR_INIT(&va_fetch); | |
800 | VATTR_WANTED(&va_fetch, va_flags); | |
801 | error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); | |
802 | if (error) { | |
803 | /* failed to get the bsd flags so the file is not compressed */ | |
804 | ret = FILE_IS_NOT_COMPRESSED; | |
805 | goto done; | |
806 | } | |
807 | if (va_fetch.va_flags & UF_COMPRESSED) { | |
808 | /* UF_COMPRESSED is on, make sure the file has the DECMPFS_XATTR_NAME xattr */ | |
809 | error = decmpfs_fetch_compressed_header(vp, cp, &hdr, 1, &alloc_size); | |
810 | if ((hdr != NULL) && (error == ERANGE)) { | |
811 | saveInvalid = 1; | |
812 | } | |
813 | if (error) { | |
814 | /* failed to get the xattr so the file is not compressed */ | |
815 | ret = FILE_IS_NOT_COMPRESSED; | |
816 | goto done; | |
817 | } | |
818 | /* | |
819 | * We got the xattr, so the file is at least tagged compressed. | |
820 | * For DATALESS, regular files and directories can be "compressed". | |
821 | * For all other types, only files are allowed. | |
822 | */ | |
823 | if (!vnode_isreg(vp) && | |
824 | !(decmpfs_type_is_dataless(hdr->compression_type) && vnode_isdir(vp))) { | |
825 | ret = FILE_IS_NOT_COMPRESSED; | |
826 | goto done; | |
827 | } | |
828 | ret = FILE_IS_COMPRESSED; | |
829 | goto done; | |
830 | } | |
831 | /* UF_COMPRESSED isn't on, so the file isn't compressed */ | |
832 | ret = FILE_IS_NOT_COMPRESSED; | |
833 | ||
834 | done: | |
835 | if (((ret == FILE_IS_COMPRESSED) || saveInvalid) && hdr) { | |
836 | /* | |
837 | * cache the uncompressed size away in the cnode | |
838 | */ | |
839 | ||
840 | if (!cnode_locked) { | |
841 | /* | |
842 | * we should never get here since the only place ret is set to FILE_IS_COMPRESSED | |
843 | * is after the call to decmpfs_lock_compressed_data above | |
844 | */ | |
845 | decmpfs_lock_compressed_data(cp, 1); | |
846 | cnode_locked = 1; | |
847 | } | |
848 | ||
849 | if (vnode_isdir(vp)) { | |
850 | decmpfs_cnode_set_vnode_cached_size(cp, 64); | |
851 | decmpfs_cnode_set_vnode_cached_nchildren(cp, decmpfs_get_directory_entries(hdr)); | |
852 | if (hdr->compression_type == DATALESS_PKG_CMPFS_TYPE) { | |
853 | decmpfs_cnode_set_vnode_cached_total_size(cp, DECMPFS_PKG_SIZE(hdr->_size)); | |
854 | } | |
855 | } else { | |
856 | decmpfs_cnode_set_vnode_cached_size(cp, hdr->uncompressed_size); | |
857 | } | |
858 | decmpfs_cnode_set_vnode_state(cp, ret, 1); | |
859 | decmpfs_cnode_set_vnode_cmp_type(cp, hdr->compression_type, 1); | |
860 | /* remember if the xattr's size was equal to the minimal xattr */ | |
861 | if (hdr->attr_size == sizeof(decmpfs_disk_header)) { | |
862 | decmpfs_cnode_set_vnode_minimal_xattr(cp, 1, 1); | |
863 | } | |
864 | if (ret == FILE_IS_COMPRESSED) { | |
865 | /* update the ubc's size for this file */ | |
866 | ubc_setsize(vp, hdr->uncompressed_size); | |
867 | ||
868 | /* update the decompression flags in the decmpfs cnode */ | |
869 | lck_rw_lock_shared(&decompressorsLock); | |
870 | decmpfs_get_decompression_flags_func get_flags = decmp_get_func(vp, hdr->compression_type, get_flags); | |
871 | if (get_flags) { | |
872 | decompression_flags = get_flags(vp, decmpfs_ctx, hdr); | |
873 | } | |
874 | lck_rw_unlock_shared(&decompressorsLock); | |
875 | decmpfs_cnode_set_decompression_flags(cp, decompression_flags); | |
876 | } | |
877 | } else { | |
878 | /* we might have already taken the lock above; if so, skip taking it again by passing cnode_locked as the skiplock parameter */ | |
879 | decmpfs_cnode_set_vnode_state(cp, ret, cnode_locked); | |
880 | } | |
881 | ||
882 | if (cnode_locked) { | |
883 | decmpfs_unlock_compressed_data(cp, 1); | |
884 | } | |
885 | ||
886 | if (hdr != NULL) { | |
887 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
888 | } | |
889 | ||
890 | /* | |
891 | * Trace the following parameters on return with event-id 0x03120014. | |
892 | * | |
893 | * @vp->v_id: vnode-id of the file being queried. | |
894 | * @return: set to 1 is file is compressed. | |
895 | */ | |
896 | switch (ret) { | |
897 | case FILE_IS_NOT_COMPRESSED: | |
898 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); | |
899 | return 0; | |
900 | case FILE_IS_COMPRESSED: | |
901 | case FILE_IS_CONVERTING: | |
902 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 1); | |
903 | return 1; | |
904 | default: | |
905 | /* unknown state, assume file is not compressed */ | |
906 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); | |
907 | ErrorLogWithPath("unknown ret %d\n", ret); | |
908 | return 0; | |
909 | } | |
910 | } | |
911 | ||
912 | int | |
913 | decmpfs_update_attributes(vnode_t vp, struct vnode_attr *vap) | |
914 | { | |
915 | int error = 0; | |
916 | ||
917 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
918 | /* the BSD flags are being updated */ | |
919 | if (vap->va_flags & UF_COMPRESSED) { | |
920 | /* the compressed bit is being set, did it change? */ | |
921 | struct vnode_attr va_fetch; | |
922 | int old_flags = 0; | |
923 | VATTR_INIT(&va_fetch); | |
924 | VATTR_WANTED(&va_fetch, va_flags); | |
925 | error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); | |
926 | if (error) { | |
927 | return error; | |
928 | } | |
929 | ||
930 | old_flags = va_fetch.va_flags; | |
931 | ||
932 | if (!(old_flags & UF_COMPRESSED)) { | |
933 | /* | |
934 | * Compression bit was turned on, make sure the file has the DECMPFS_XATTR_NAME attribute. | |
935 | * This precludes anyone from using the UF_COMPRESSED bit for anything else, and it enforces | |
936 | * an order of operation -- you must first do the setxattr and then the chflags. | |
937 | */ | |
938 | ||
939 | if (VATTR_IS_ACTIVE(vap, va_data_size)) { | |
940 | /* | |
941 | * don't allow the caller to set the BSD flag and the size in the same call | |
942 | * since this doesn't really make sense | |
943 | */ | |
944 | vap->va_flags &= ~UF_COMPRESSED; | |
945 | return 0; | |
946 | } | |
947 | ||
948 | decmpfs_header *hdr = NULL; | |
949 | size_t alloc_size = 0; | |
950 | error = decmpfs_fetch_compressed_header(vp, NULL, &hdr, 1, &alloc_size); | |
951 | if (error == 0) { | |
952 | /* | |
953 | * Allow the flag to be set since the decmpfs attribute | |
954 | * is present. | |
955 | * | |
956 | * If we're creating a dataless file we do not want to | |
957 | * truncate it to zero which allows the file resolver to | |
958 | * have more control over when truncation should happen. | |
959 | * All other types of compressed files are truncated to | |
960 | * zero. | |
961 | */ | |
962 | if (!decmpfs_type_is_dataless(hdr->compression_type)) { | |
963 | VATTR_SET_ACTIVE(vap, va_data_size); | |
964 | vap->va_data_size = 0; | |
965 | } | |
966 | } else if (error == ERANGE) { | |
967 | /* the file had a decmpfs attribute but the type was out of range, so don't muck with the file's data size */ | |
968 | } else { | |
969 | /* no DECMPFS_XATTR_NAME attribute, so deny the update */ | |
970 | vap->va_flags &= ~UF_COMPRESSED; | |
971 | } | |
972 | if (hdr != NULL) { | |
973 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
974 | } | |
975 | } | |
976 | } | |
977 | } | |
978 | ||
979 | return 0; | |
980 | } | |
981 | ||
982 | static int | |
983 | wait_for_decompress(decmpfs_cnode *cp) | |
984 | { | |
985 | int state; | |
986 | lck_mtx_lock(&decompress_channel_mtx); | |
987 | do { | |
988 | state = decmpfs_fast_get_state(cp); | |
989 | if (state != FILE_IS_CONVERTING) { | |
990 | /* file is not decompressing */ | |
991 | lck_mtx_unlock(&decompress_channel_mtx); | |
992 | return state; | |
993 | } | |
994 | msleep((caddr_t)&decompress_channel, &decompress_channel_mtx, PINOD, "wait_for_decompress", NULL); | |
995 | } while (1); | |
996 | } | |
997 | ||
998 | #pragma mark --- decmpfs hide query routines --- | |
999 | ||
1000 | int | |
1001 | decmpfs_hides_rsrc(vfs_context_t ctx, decmpfs_cnode *cp) | |
1002 | { | |
1003 | /* | |
1004 | * WARNING!!! | |
1005 | * callers may (and do) pass NULL for ctx, so we should only use it | |
1006 | * for this equality comparison | |
1007 | * | |
1008 | * This routine should only be called after a file has already been through decmpfs_file_is_compressed | |
1009 | */ | |
1010 | ||
1011 | if (ctx == decmpfs_ctx) { | |
1012 | return 0; | |
1013 | } | |
1014 | ||
1015 | if (!decmpfs_fast_file_is_compressed(cp)) { | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | /* all compressed files hide their resource fork */ | |
1020 | return 1; | |
1021 | } | |
1022 | ||
1023 | int | |
1024 | decmpfs_hides_xattr(vfs_context_t ctx, decmpfs_cnode *cp, const char *xattr) | |
1025 | { | |
1026 | /* | |
1027 | * WARNING!!! | |
1028 | * callers may (and do) pass NULL for ctx, so we should only use it | |
1029 | * for this equality comparison | |
1030 | * | |
1031 | * This routine should only be called after a file has already been through decmpfs_file_is_compressed | |
1032 | */ | |
1033 | ||
1034 | if (ctx == decmpfs_ctx) { | |
1035 | return 0; | |
1036 | } | |
1037 | if (strncmp(xattr, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME) - 1) == 0) { | |
1038 | return decmpfs_hides_rsrc(ctx, cp); | |
1039 | } | |
1040 | if (!decmpfs_fast_file_is_compressed(cp)) { | |
1041 | /* file is not compressed, so don't hide this xattr */ | |
1042 | return 0; | |
1043 | } | |
1044 | if (strncmp(xattr, DECMPFS_XATTR_NAME, sizeof(DECMPFS_XATTR_NAME) - 1) == 0) { | |
1045 | /* it's our xattr, so hide it */ | |
1046 | return 1; | |
1047 | } | |
1048 | /* don't hide this xattr */ | |
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | #pragma mark --- registration/validation routines --- | |
1053 | ||
1054 | static inline int | |
1055 | registration_valid(const decmpfs_registration *registration) | |
1056 | { | |
1057 | return registration && ((registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V1) || (registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V3)); | |
1058 | } | |
1059 | ||
1060 | errno_t | |
1061 | register_decmpfs_decompressor(uint32_t compression_type, const decmpfs_registration *registration) | |
1062 | { | |
1063 | /* called by kexts to register decompressors */ | |
1064 | ||
1065 | errno_t ret = 0; | |
1066 | int locked = 0; | |
1067 | char resourceName[80]; | |
1068 | ||
1069 | if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { | |
1070 | ret = EINVAL; | |
1071 | goto out; | |
1072 | } | |
1073 | ||
1074 | lck_rw_lock_exclusive(&decompressorsLock); locked = 1; | |
1075 | ||
1076 | /* make sure the registration for this type is zero */ | |
1077 | if (decompressors[compression_type] != NULL) { | |
1078 | ret = EEXIST; | |
1079 | goto out; | |
1080 | } | |
1081 | decompressors[compression_type] = registration; | |
1082 | snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); | |
1083 | IOServicePublishResource(resourceName, TRUE); | |
1084 | ||
1085 | out: | |
1086 | if (locked) { | |
1087 | lck_rw_unlock_exclusive(&decompressorsLock); | |
1088 | } | |
1089 | return ret; | |
1090 | } | |
1091 | ||
1092 | errno_t | |
1093 | unregister_decmpfs_decompressor(uint32_t compression_type, decmpfs_registration *registration) | |
1094 | { | |
1095 | /* called by kexts to unregister decompressors */ | |
1096 | ||
1097 | errno_t ret = 0; | |
1098 | int locked = 0; | |
1099 | char resourceName[80]; | |
1100 | ||
1101 | if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { | |
1102 | ret = EINVAL; | |
1103 | goto out; | |
1104 | } | |
1105 | ||
1106 | lck_rw_lock_exclusive(&decompressorsLock); locked = 1; | |
1107 | if (decompressors[compression_type] != registration) { | |
1108 | ret = EEXIST; | |
1109 | goto out; | |
1110 | } | |
1111 | decompressors[compression_type] = NULL; | |
1112 | snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); | |
1113 | IOServicePublishResource(resourceName, FALSE); | |
1114 | ||
1115 | out: | |
1116 | if (locked) { | |
1117 | lck_rw_unlock_exclusive(&decompressorsLock); | |
1118 | } | |
1119 | return ret; | |
1120 | } | |
1121 | ||
1122 | static int | |
1123 | compression_type_valid(vnode_t vp, decmpfs_header *hdr) | |
1124 | { | |
1125 | /* fast pre-check to determine if the given compressor has checked in */ | |
1126 | int ret = 0; | |
1127 | ||
1128 | /* every compressor must have at least a fetch function */ | |
1129 | lck_rw_lock_shared(&decompressorsLock); | |
1130 | if (decmp_get_func(vp, hdr->compression_type, fetch) != NULL) { | |
1131 | ret = 1; | |
1132 | } | |
1133 | lck_rw_unlock_shared(&decompressorsLock); | |
1134 | ||
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | #pragma mark --- compression/decompression routines --- | |
1139 | ||
1140 | static int | |
1141 | decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_cnode *cp, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) | |
1142 | { | |
1143 | /* get the uncompressed bytes for the specified region of vp by calling out to the registered compressor */ | |
1144 | ||
1145 | int err = 0; | |
1146 | ||
1147 | *bytes_read = 0; | |
1148 | ||
1149 | if (offset >= (off_t)hdr->uncompressed_size) { | |
1150 | /* reading past end of file; nothing to do */ | |
1151 | err = 0; | |
1152 | goto out; | |
1153 | } | |
1154 | if (offset < 0) { | |
1155 | /* tried to read from before start of file */ | |
1156 | err = EINVAL; | |
1157 | goto out; | |
1158 | } | |
1159 | if (hdr->uncompressed_size - offset < size) { | |
1160 | /* adjust size so we don't read past the end of the file */ | |
1161 | size = (user_ssize_t)(hdr->uncompressed_size - offset); | |
1162 | } | |
1163 | if (size == 0) { | |
1164 | /* nothing to read */ | |
1165 | err = 0; | |
1166 | goto out; | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * Trace the following parameters on entry with event-id 0x03120008. | |
1171 | * | |
1172 | * @vp->v_id: vnode-id of the file being decompressed. | |
1173 | * @hdr->compression_type: compression type. | |
1174 | * @offset: offset from where to fetch uncompressed data. | |
1175 | * @size: amount of uncompressed data to fetch. | |
1176 | * | |
1177 | * Please NOTE: @offset and @size can overflow in theory but | |
1178 | * here it is safe. | |
1179 | */ | |
1180 | DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, | |
1181 | hdr->compression_type, (int)offset, (int)size); | |
1182 | lck_rw_lock_shared(&decompressorsLock); | |
1183 | decmpfs_fetch_uncompressed_data_func fetch = decmp_get_func(vp, hdr->compression_type, fetch); | |
1184 | if (fetch) { | |
1185 | err = fetch(vp, decmpfs_ctx, hdr, offset, size, nvec, vec, bytes_read); | |
1186 | lck_rw_unlock_shared(&decompressorsLock); | |
1187 | if (err == 0) { | |
1188 | uint64_t decompression_flags = decmpfs_cnode_get_decompression_flags(cp); | |
1189 | if (decompression_flags & DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS) { | |
1190 | #if !defined(__i386__) && !defined(__x86_64__) | |
1191 | int i; | |
1192 | for (i = 0; i < nvec; i++) { | |
1193 | assert(vec[i].size >= 0 && vec[i].size <= UINT_MAX); | |
1194 | flush_dcache64((addr64_t)(uintptr_t)vec[i].buf, (unsigned int)vec[i].size, FALSE); | |
1195 | } | |
1196 | #endif | |
1197 | } | |
1198 | } | |
1199 | } else { | |
1200 | err = ENOTSUP; | |
1201 | lck_rw_unlock_shared(&decompressorsLock); | |
1202 | } | |
1203 | /* | |
1204 | * Trace the following parameters on return with event-id 0x03120008. | |
1205 | * | |
1206 | * @vp->v_id: vnode-id of the file being decompressed. | |
1207 | * @bytes_read: amount of uncompressed bytes fetched in bytes. | |
1208 | * @err: value returned from this function. | |
1209 | * | |
1210 | * Please NOTE: @bytes_read can overflow in theory but here it is safe. | |
1211 | */ | |
1212 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, | |
1213 | (int)*bytes_read, err); | |
1214 | out: | |
1215 | return err; | |
1216 | } | |
1217 | ||
1218 | static kern_return_t | |
1219 | commit_upl(upl_t upl, upl_offset_t pl_offset, size_t uplSize, int flags, int abort) | |
1220 | { | |
1221 | kern_return_t kr = 0; | |
1222 | ||
1223 | #if CONFIG_IOSCHED | |
1224 | upl_unmark_decmp(upl); | |
1225 | #endif /* CONFIG_IOSCHED */ | |
1226 | ||
1227 | /* commit the upl pages */ | |
1228 | if (abort) { | |
1229 | VerboseLog("aborting upl, flags 0x%08x\n", flags); | |
1230 | kr = ubc_upl_abort_range(upl, pl_offset, (upl_size_t)uplSize, flags); | |
1231 | if (kr != KERN_SUCCESS) { | |
1232 | ErrorLog("ubc_upl_abort_range error %d\n", (int)kr); | |
1233 | } | |
1234 | } else { | |
1235 | VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY); | |
1236 | kr = ubc_upl_commit_range(upl, pl_offset, (upl_size_t)uplSize, flags | UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_WRITTEN_BY_KERNEL); | |
1237 | if (kr != KERN_SUCCESS) { | |
1238 | ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); | |
1239 | } | |
1240 | } | |
1241 | return kr; | |
1242 | } | |
1243 | ||
1244 | ||
1245 | errno_t | |
1246 | decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmpfs_cnode *cp) | |
1247 | { | |
1248 | /* handles a page-in request from vfs for a compressed file */ | |
1249 | ||
1250 | int err = 0; | |
1251 | vnode_t vp = ap->a_vp; | |
1252 | upl_t pl = ap->a_pl; | |
1253 | upl_offset_t pl_offset = ap->a_pl_offset; | |
1254 | off_t f_offset = ap->a_f_offset; | |
1255 | size_t size = ap->a_size; | |
1256 | int flags = ap->a_flags; | |
1257 | off_t uplPos = 0; | |
1258 | user_ssize_t uplSize = 0; | |
1259 | size_t verify_block_size = 0; | |
1260 | void *data = NULL; | |
1261 | decmpfs_header *hdr = NULL; | |
1262 | size_t alloc_size = 0; | |
1263 | uint64_t cachedSize = 0; | |
1264 | int cmpdata_locked = 0; | |
1265 | bool file_tail_page_valid = false; | |
1266 | int num_valid_pages = 0; | |
1267 | int num_invalid_pages = 0; | |
1268 | ||
1269 | if (!decmpfs_trylock_compressed_data(cp, 0)) { | |
1270 | return EAGAIN; | |
1271 | } | |
1272 | cmpdata_locked = 1; | |
1273 | ||
1274 | ||
1275 | if (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD)) { | |
1276 | DebugLogWithPath("pagein: unknown flags 0x%08x\n", (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD))); | |
1277 | } | |
1278 | ||
1279 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0, &alloc_size); | |
1280 | if (err != 0) { | |
1281 | goto out; | |
1282 | } | |
1283 | ||
1284 | cachedSize = hdr->uncompressed_size; | |
1285 | ||
1286 | if (!compression_type_valid(vp, hdr)) { | |
1287 | /* compressor not registered */ | |
1288 | err = ENOTSUP; | |
1289 | goto out; | |
1290 | } | |
1291 | ||
1292 | /* | |
1293 | * If the verify block size is larger than the page size, the UPL needs | |
1294 | * to be aligned to it, Since the UPL has been created by the filesystem, | |
1295 | * we will only check if the passed in UPL length conforms to the | |
1296 | * alignment requirements. | |
1297 | */ | |
1298 | err = VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, | |
1299 | VNODE_VERIFY_DEFAULT, NULL); | |
1300 | if (err) { | |
1301 | goto out; | |
1302 | } else if (verify_block_size) { | |
1303 | if (verify_block_size & (verify_block_size - 1)) { | |
1304 | ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n"); | |
1305 | err = EINVAL; | |
1306 | } else if (size % verify_block_size) { | |
1307 | ErrorLogWithPath("upl size is not a multiple of verify block size\n"); | |
1308 | err = EINVAL; | |
1309 | } | |
1310 | if (err) { | |
1311 | goto out; | |
1312 | } | |
1313 | } | |
1314 | ||
1315 | #if CONFIG_IOSCHED | |
1316 | /* Mark the UPL as the requesting UPL for decompression */ | |
1317 | upl_mark_decmp(pl); | |
1318 | #endif /* CONFIG_IOSCHED */ | |
1319 | ||
1320 | /* map the upl so we can fetch into it */ | |
1321 | kern_return_t kr = ubc_upl_map(pl, (vm_offset_t*)&data); | |
1322 | if ((kr != KERN_SUCCESS) || (data == NULL)) { | |
1323 | err = ENOSPC; | |
1324 | data = NULL; | |
1325 | #if CONFIG_IOSCHED | |
1326 | upl_unmark_decmp(pl); | |
1327 | #endif /* CONFIG_IOSCHED */ | |
1328 | goto out; | |
1329 | } | |
1330 | ||
1331 | uplPos = f_offset; | |
1332 | uplSize = size; | |
1333 | ||
1334 | /* clip the size to the size of the file */ | |
1335 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1336 | /* truncate the read to the size of the file */ | |
1337 | uplSize = (user_ssize_t)(cachedSize - uplPos); | |
1338 | } | |
1339 | ||
1340 | /* do the fetch */ | |
1341 | decmpfs_vector vec; | |
1342 | ||
1343 | decompress: | |
1344 | /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */ | |
1345 | vec = (decmpfs_vector) { | |
1346 | .buf = (char*)data + pl_offset, | |
1347 | .size = size, | |
1348 | }; | |
1349 | ||
1350 | uint64_t did_read = 0; | |
1351 | if (decmpfs_fast_get_state(cp) == FILE_IS_CONVERTING) { | |
1352 | ErrorLogWithPath("unexpected pagein during decompress\n"); | |
1353 | /* | |
1354 | * if the file is converting, this must be a recursive call to pagein from underneath a call to decmpfs_decompress_file; | |
1355 | * pretend that it succeeded but don't do anything since we're just going to write over the pages anyway | |
1356 | */ | |
1357 | err = 0; | |
1358 | } else { | |
1359 | if (!verify_block_size || (verify_block_size <= PAGE_SIZE)) { | |
1360 | err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, uplPos, uplSize, 1, &vec, &did_read); | |
1361 | } else { | |
1362 | off_t l_uplPos = uplPos; | |
1363 | off_t l_pl_offset = pl_offset; | |
1364 | user_ssize_t l_uplSize = uplSize; | |
1365 | upl_page_info_t *pl_info = ubc_upl_pageinfo(pl); | |
1366 | ||
1367 | err = 0; | |
1368 | /* | |
1369 | * When the system page size is less than the "verify block size", | |
1370 | * the UPL passed may not consist solely of absent pages. | |
1371 | * We have to detect the "absent" pages and only decompress | |
1372 | * into those absent/invalid page ranges. | |
1373 | * | |
1374 | * Things that will change in each iteration of the loop : | |
1375 | * | |
1376 | * l_pl_offset = where we are inside the UPL [0, caller_upl_created_size) | |
1377 | * l_uplPos = the file offset the l_pl_offset corresponds to. | |
1378 | * l_uplSize = the size of the upl still unprocessed; | |
1379 | * | |
1380 | * In this picture, we have to do the transfer on 2 ranges | |
1381 | * (One 2 page range and one 3 page range) and the loop | |
1382 | * below will skip the first two pages and then identify | |
1383 | * the next two as invalid and fill those in and | |
1384 | * then skip the next one and then do the last pages. | |
1385 | * | |
1386 | * uplPos(file_offset) | |
1387 | * | uplSize | |
1388 | * 0 V<--------------> file_size | |
1389 | * |---------------------------------------------------> | |
1390 | * | | |V|V|I|I|V|I|I|I| | |
1391 | * ^ | |
1392 | * | upl | |
1393 | * <-------------------> | |
1394 | * | | |
1395 | * pl_offset | |
1396 | * | |
1397 | * uplSize will be clipped in case the UPL range exceeds | |
1398 | * the file size. | |
1399 | * | |
1400 | */ | |
1401 | while (l_uplSize) { | |
1402 | uint64_t l_did_read = 0; | |
1403 | int pl_offset_pg = (int)(l_pl_offset / PAGE_SIZE); | |
1404 | int pages_left_in_upl; | |
1405 | int start_pg; | |
1406 | int last_pg; | |
1407 | ||
1408 | /* | |
1409 | * l_uplSize may start off less than the size of the upl, | |
1410 | * we have to round it up to PAGE_SIZE to calculate | |
1411 | * how many more pages are left. | |
1412 | */ | |
1413 | pages_left_in_upl = (int)(round_page((vm_offset_t)l_uplSize) / PAGE_SIZE); | |
1414 | ||
1415 | /* | |
1416 | * scan from the beginning of the upl looking for the first | |
1417 | * non-valid page.... this will become the first page in | |
1418 | * the request we're going to make to | |
1419 | * 'decmpfs_fetch_uncompressed_data'... if all | |
1420 | * of the pages are valid, we won't call through | |
1421 | * to 'decmpfs_fetch_uncompressed_data' | |
1422 | */ | |
1423 | for (start_pg = 0; start_pg < pages_left_in_upl; start_pg++) { | |
1424 | if (!upl_valid_page(pl_info, pl_offset_pg + start_pg)) { | |
1425 | break; | |
1426 | } | |
1427 | } | |
1428 | ||
1429 | num_valid_pages += start_pg; | |
1430 | ||
1431 | /* | |
1432 | * scan from the starting invalid page looking for | |
1433 | * a valid page before the end of the upl is | |
1434 | * reached, if we find one, then it will be the | |
1435 | * last page of the request to 'decmpfs_fetch_uncompressed_data' | |
1436 | */ | |
1437 | for (last_pg = start_pg; last_pg < pages_left_in_upl; last_pg++) { | |
1438 | if (upl_valid_page(pl_info, pl_offset_pg + last_pg)) { | |
1439 | break; | |
1440 | } | |
1441 | } | |
1442 | ||
1443 | if (start_pg < last_pg) { | |
1444 | off_t inval_offset = start_pg * PAGE_SIZE; | |
1445 | int inval_pages = last_pg - start_pg; | |
1446 | int inval_size = inval_pages * PAGE_SIZE; | |
1447 | decmpfs_vector l_vec; | |
1448 | ||
1449 | num_invalid_pages += inval_pages; | |
1450 | if (inval_offset) { | |
1451 | did_read += inval_offset; | |
1452 | l_pl_offset += inval_offset; | |
1453 | l_uplPos += inval_offset; | |
1454 | l_uplSize -= inval_offset; | |
1455 | } | |
1456 | ||
1457 | l_vec = (decmpfs_vector) { | |
1458 | .buf = (char*)data + l_pl_offset, | |
1459 | .size = inval_size, | |
1460 | }; | |
1461 | ||
1462 | err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, l_uplPos, | |
1463 | MIN(l_uplSize, inval_size), 1, &l_vec, &l_did_read); | |
1464 | ||
1465 | if (!err && (l_did_read != inval_size) && (l_uplSize > inval_size)) { | |
1466 | ErrorLogWithPath("Unexpected size fetch of decompressed data, l_uplSize = %d, l_did_read = %d, inval_size = %d\n", | |
1467 | (int)l_uplSize, (int)l_did_read, (int)inval_size); | |
1468 | err = EINVAL; | |
1469 | } | |
1470 | } else { | |
1471 | /* no invalid pages left */ | |
1472 | l_did_read = l_uplSize; | |
1473 | if (uplSize < size) { | |
1474 | file_tail_page_valid = true; | |
1475 | } | |
1476 | } | |
1477 | ||
1478 | if (err) { | |
1479 | break; | |
1480 | } | |
1481 | ||
1482 | did_read += l_did_read; | |
1483 | l_pl_offset += l_did_read; | |
1484 | l_uplPos += l_did_read; | |
1485 | l_uplSize -= l_did_read; | |
1486 | } | |
1487 | } | |
1488 | } | |
1489 | if (err) { | |
1490 | DebugLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); | |
1491 | int cmp_state = decmpfs_fast_get_state(cp); | |
1492 | if (cmp_state == FILE_IS_CONVERTING) { | |
1493 | DebugLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); | |
1494 | cmp_state = wait_for_decompress(cp); | |
1495 | if (cmp_state == FILE_IS_COMPRESSED) { | |
1496 | DebugLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); | |
1497 | /* a decompress was attempted but it failed, let's try calling fetch again */ | |
1498 | goto decompress; | |
1499 | } | |
1500 | } | |
1501 | if (cmp_state == FILE_IS_NOT_COMPRESSED) { | |
1502 | DebugLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); | |
1503 | /* the file was decompressed after we started reading it */ | |
1504 | *is_compressed = 0; /* instruct caller to fall back to its normal path */ | |
1505 | } | |
1506 | } | |
1507 | ||
1508 | /* zero out whatever we didn't read, and zero out the end of the last page(s) */ | |
1509 | uint64_t total_size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | |
1510 | if (did_read < total_size && !(verify_block_size && err)) { | |
1511 | uint64_t rounded_up_did_read = file_tail_page_valid ? (uint64_t)(round_page((vm_offset_t)did_read)) : did_read; | |
1512 | memset((char*)vec.buf + rounded_up_did_read, 0, (size_t)(total_size - rounded_up_did_read)); | |
1513 | } | |
1514 | ||
1515 | if (!err && verify_block_size) { | |
1516 | size_t cur_verify_block_size = verify_block_size; | |
1517 | ||
1518 | if ((err = VNOP_VERIFY(vp, uplPos, vec.buf, size, &cur_verify_block_size, 0, NULL))) { | |
1519 | ErrorLogWithPath("Verification failed with error %d, uplPos = %lld, uplSize = %d, did_read = %d, total_size = %d, valid_pages = %d, invalid_pages = %d, tail_page_valid = %d\n", | |
1520 | err, (long long)uplPos, (int)uplSize, (int)did_read, (int)total_size, num_valid_pages, num_invalid_pages, file_tail_page_valid); | |
1521 | } | |
1522 | /* XXX : If the verify block size changes, redo the read */ | |
1523 | } | |
1524 | ||
1525 | #if CONFIG_IOSCHED | |
1526 | upl_unmark_decmp(pl); | |
1527 | #endif /* CONFIG_IOSCHED */ | |
1528 | ||
1529 | kr = ubc_upl_unmap(pl); data = NULL; /* make sure to set data to NULL so we don't try to unmap again below */ | |
1530 | if (kr != KERN_SUCCESS) { | |
1531 | ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); | |
1532 | } else { | |
1533 | if (!err) { | |
1534 | /* commit our pages */ | |
1535 | kr = commit_upl(pl, pl_offset, (size_t)total_size, UPL_COMMIT_FREE_ON_EMPTY, 0); | |
1536 | } | |
1537 | } | |
1538 | ||
1539 | out: | |
1540 | if (data) { | |
1541 | ubc_upl_unmap(pl); | |
1542 | } | |
1543 | if (hdr != NULL) { | |
1544 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
1545 | } | |
1546 | if (cmpdata_locked) { | |
1547 | decmpfs_unlock_compressed_data(cp, 0); | |
1548 | } | |
1549 | if (err) { | |
1550 | #if 0 | |
1551 | if (err != ENXIO && err != ENOSPC) { | |
1552 | char *path = zalloc(ZV_NAMEI); | |
1553 | panic("%s: decmpfs_pagein_compressed: err %d", vnpath(vp, path, PATH_MAX), err); | |
1554 | zfree(ZV_NAMEI, path); | |
1555 | } | |
1556 | #endif /* 0 */ | |
1557 | ErrorLogWithPath("err %d\n", err); | |
1558 | } | |
1559 | return err; | |
1560 | } | |
1561 | ||
1562 | errno_t | |
1563 | decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_cnode *cp) | |
1564 | { | |
1565 | /* handles a read request from vfs for a compressed file */ | |
1566 | ||
1567 | uio_t uio = ap->a_uio; | |
1568 | vnode_t vp = ap->a_vp; | |
1569 | int err = 0; | |
1570 | int countInt = 0; | |
1571 | off_t uplPos = 0; | |
1572 | user_ssize_t uplSize = 0; | |
1573 | user_ssize_t uplRemaining = 0; | |
1574 | off_t curUplPos = 0; | |
1575 | user_ssize_t curUplSize = 0; | |
1576 | kern_return_t kr = KERN_SUCCESS; | |
1577 | int abort_read = 0; | |
1578 | void *data = NULL; | |
1579 | uint64_t did_read = 0; | |
1580 | upl_t upl = NULL; | |
1581 | upl_page_info_t *pli = NULL; | |
1582 | decmpfs_header *hdr = NULL; | |
1583 | size_t alloc_size = 0; | |
1584 | uint64_t cachedSize = 0; | |
1585 | off_t uioPos = 0; | |
1586 | user_ssize_t uioRemaining = 0; | |
1587 | size_t verify_block_size = 0; | |
1588 | size_t alignment_size = PAGE_SIZE; | |
1589 | int cmpdata_locked = 0; | |
1590 | ||
1591 | decmpfs_lock_compressed_data(cp, 0); cmpdata_locked = 1; | |
1592 | ||
1593 | uplPos = uio_offset(uio); | |
1594 | uplSize = uio_resid(uio); | |
1595 | VerboseLogWithPath("uplPos %lld uplSize %lld\n", uplPos, uplSize); | |
1596 | ||
1597 | cachedSize = decmpfs_cnode_get_vnode_cached_size(cp); | |
1598 | ||
1599 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1600 | /* truncate the read to the size of the file */ | |
1601 | uplSize = (user_ssize_t)(cachedSize - uplPos); | |
1602 | } | |
1603 | ||
1604 | /* give the cluster layer a chance to fill in whatever it already has */ | |
1605 | countInt = (uplSize > INT_MAX) ? INT_MAX : (int)uplSize; | |
1606 | err = cluster_copy_ubc_data(vp, uio, &countInt, 0); | |
1607 | if (err != 0) { | |
1608 | goto out; | |
1609 | } | |
1610 | ||
1611 | /* figure out what's left */ | |
1612 | uioPos = uio_offset(uio); | |
1613 | uioRemaining = uio_resid(uio); | |
1614 | if ((uint64_t)uioPos + uioRemaining > cachedSize) { | |
1615 | /* truncate the read to the size of the file */ | |
1616 | uioRemaining = (user_ssize_t)(cachedSize - uioPos); | |
1617 | } | |
1618 | ||
1619 | if (uioRemaining <= 0) { | |
1620 | /* nothing left */ | |
1621 | goto out; | |
1622 | } | |
1623 | ||
1624 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0, &alloc_size); | |
1625 | if (err != 0) { | |
1626 | goto out; | |
1627 | } | |
1628 | if (!compression_type_valid(vp, hdr)) { | |
1629 | err = ENOTSUP; | |
1630 | goto out; | |
1631 | } | |
1632 | ||
1633 | uplPos = uioPos; | |
1634 | uplSize = uioRemaining; | |
1635 | #if COMPRESSION_DEBUG | |
1636 | DebugLogWithPath("uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); | |
1637 | #endif | |
1638 | ||
1639 | lck_rw_lock_shared(&decompressorsLock); | |
1640 | decmpfs_adjust_fetch_region_func adjust_fetch = decmp_get_func(vp, hdr->compression_type, adjust_fetch); | |
1641 | if (adjust_fetch) { | |
1642 | /* give the compressor a chance to adjust the portion of the file that we read */ | |
1643 | adjust_fetch(vp, decmpfs_ctx, hdr, &uplPos, &uplSize); | |
1644 | VerboseLogWithPath("adjusted uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); | |
1645 | } | |
1646 | lck_rw_unlock_shared(&decompressorsLock); | |
1647 | ||
1648 | /* clip the adjusted size to the size of the file */ | |
1649 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1650 | /* truncate the read to the size of the file */ | |
1651 | uplSize = (user_ssize_t)(cachedSize - uplPos); | |
1652 | } | |
1653 | ||
1654 | if (uplSize <= 0) { | |
1655 | /* nothing left */ | |
1656 | goto out; | |
1657 | } | |
1658 | ||
1659 | /* | |
1660 | * since we're going to create a upl for the given region of the file, | |
1661 | * make sure we're on page boundaries | |
1662 | */ | |
1663 | ||
1664 | /* If the verify block size is larger than the page size, the UPL needs to aligned to it */ | |
1665 | err = VNOP_VERIFY(vp, uplPos, NULL, 0, &verify_block_size, VNODE_VERIFY_DEFAULT, NULL); | |
1666 | if (err) { | |
1667 | goto out; | |
1668 | } else if (verify_block_size) { | |
1669 | if (verify_block_size & (verify_block_size - 1)) { | |
1670 | ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n"); | |
1671 | verify_block_size = 0; | |
1672 | } else if (verify_block_size > PAGE_SIZE) { | |
1673 | alignment_size = verify_block_size; | |
1674 | } | |
1675 | } | |
1676 | ||
1677 | if (uplPos & (alignment_size - 1)) { | |
1678 | /* round position down to page boundary */ | |
1679 | uplSize += (uplPos & (alignment_size - 1)); | |
1680 | uplPos &= ~(alignment_size - 1); | |
1681 | } | |
1682 | ||
1683 | /* round size up to alignement_size multiple */ | |
1684 | uplSize = (uplSize + (alignment_size - 1)) & ~(alignment_size - 1); | |
1685 | ||
1686 | VerboseLogWithPath("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); | |
1687 | ||
1688 | uplRemaining = uplSize; | |
1689 | curUplPos = uplPos; | |
1690 | curUplSize = 0; | |
1691 | ||
1692 | while (uplRemaining > 0) { | |
1693 | /* start after the last upl */ | |
1694 | curUplPos += curUplSize; | |
1695 | ||
1696 | /* clip to max upl size */ | |
1697 | curUplSize = uplRemaining; | |
1698 | if (curUplSize > MAX_UPL_SIZE_BYTES) { | |
1699 | curUplSize = MAX_UPL_SIZE_BYTES; | |
1700 | } | |
1701 | ||
1702 | /* create the upl */ | |
1703 | kr = ubc_create_upl_kernel(vp, curUplPos, (int)curUplSize, &upl, &pli, UPL_SET_LITE, VM_KERN_MEMORY_FILE); | |
1704 | if (kr != KERN_SUCCESS) { | |
1705 | ErrorLogWithPath("ubc_create_upl error %d\n", (int)kr); | |
1706 | err = EINVAL; | |
1707 | goto out; | |
1708 | } | |
1709 | VerboseLogWithPath("curUplPos %lld curUplSize %lld\n", (uint64_t)curUplPos, (uint64_t)curUplSize); | |
1710 | ||
1711 | #if CONFIG_IOSCHED | |
1712 | /* Mark the UPL as the requesting UPL for decompression */ | |
1713 | upl_mark_decmp(upl); | |
1714 | #endif /* CONFIG_IOSCHED */ | |
1715 | ||
1716 | /* map the upl */ | |
1717 | kr = ubc_upl_map(upl, (vm_offset_t*)&data); | |
1718 | if (kr != KERN_SUCCESS) { | |
1719 | commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); | |
1720 | #if 0 | |
1721 | char *path = zalloc(ZV_NAMEI); | |
1722 | panic("%s: decmpfs_read_compressed: ubc_upl_map error %d", vnpath(vp, path, PATH_MAX), (int)kr); | |
1723 | zfree(ZV_NAMEI, path); | |
1724 | #else /* 0 */ | |
1725 | ErrorLogWithPath("ubc_upl_map kr=0x%x\n", (int)kr); | |
1726 | #endif /* 0 */ | |
1727 | err = EINVAL; | |
1728 | goto out; | |
1729 | } | |
1730 | ||
1731 | /* make sure the map succeeded */ | |
1732 | if (!data) { | |
1733 | commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); | |
1734 | ||
1735 | ErrorLogWithPath("ubc_upl_map mapped null\n"); | |
1736 | err = EINVAL; | |
1737 | goto out; | |
1738 | } | |
1739 | ||
1740 | /* fetch uncompressed data into the mapped upl */ | |
1741 | decmpfs_vector vec; | |
1742 | decompress: | |
1743 | vec = (decmpfs_vector){ .buf = data, .size = curUplSize }; | |
1744 | err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, curUplPos, curUplSize, 1, &vec, &did_read); | |
1745 | if (err) { | |
1746 | ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); | |
1747 | ||
1748 | /* maybe the file is converting to decompressed */ | |
1749 | int cmp_state = decmpfs_fast_get_state(cp); | |
1750 | if (cmp_state == FILE_IS_CONVERTING) { | |
1751 | ErrorLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); | |
1752 | cmp_state = wait_for_decompress(cp); | |
1753 | if (cmp_state == FILE_IS_COMPRESSED) { | |
1754 | ErrorLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); | |
1755 | /* a decompress was attempted but it failed, let's try fetching again */ | |
1756 | goto decompress; | |
1757 | } | |
1758 | } | |
1759 | if (cmp_state == FILE_IS_NOT_COMPRESSED) { | |
1760 | ErrorLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); | |
1761 | /* the file was decompressed after we started reading it */ | |
1762 | abort_read = 1; /* we're not going to commit our data */ | |
1763 | *is_compressed = 0; /* instruct caller to fall back to its normal path */ | |
1764 | } | |
1765 | kr = KERN_FAILURE; | |
1766 | did_read = 0; | |
1767 | } | |
1768 | ||
1769 | /* zero out the remainder of the last page */ | |
1770 | memset((char*)data + did_read, 0, (size_t)(curUplSize - did_read)); | |
1771 | if (!err && verify_block_size) { | |
1772 | size_t cur_verify_block_size = verify_block_size; | |
1773 | ||
1774 | if ((err = VNOP_VERIFY(vp, curUplPos, data, curUplSize, &cur_verify_block_size, 0, NULL))) { | |
1775 | ErrorLogWithPath("Verification failed with error %d\n", err); | |
1776 | abort_read = 1; | |
1777 | } | |
1778 | /* XXX : If the verify block size changes, redo the read */ | |
1779 | } | |
1780 | ||
1781 | kr = ubc_upl_unmap(upl); | |
1782 | if (kr == KERN_SUCCESS) { | |
1783 | if (abort_read) { | |
1784 | kr = commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); | |
1785 | } else { | |
1786 | VerboseLogWithPath("uioPos %lld uioRemaining %lld\n", (uint64_t)uioPos, (uint64_t)uioRemaining); | |
1787 | if (uioRemaining) { | |
1788 | off_t uplOff = uioPos - curUplPos; | |
1789 | if (uplOff < 0) { | |
1790 | ErrorLogWithPath("uplOff %lld should never be negative\n", (int64_t)uplOff); | |
1791 | err = EINVAL; | |
1792 | } else if (uplOff > INT_MAX) { | |
1793 | ErrorLogWithPath("uplOff %lld too large\n", (int64_t)uplOff); | |
1794 | err = EINVAL; | |
1795 | } else { | |
1796 | off_t count = curUplPos + curUplSize - uioPos; | |
1797 | if (count < 0) { | |
1798 | /* this upl is entirely before the uio */ | |
1799 | } else { | |
1800 | if (count > uioRemaining) { | |
1801 | count = uioRemaining; | |
1802 | } | |
1803 | int icount = (count > INT_MAX) ? INT_MAX : (int)count; | |
1804 | int io_resid = icount; | |
1805 | err = cluster_copy_upl_data(uio, upl, (int)uplOff, &io_resid); | |
1806 | int copied = icount - io_resid; | |
1807 | VerboseLogWithPath("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff, (uint64_t)count, (uint64_t)copied); | |
1808 | if (err) { | |
1809 | ErrorLogWithPath("cluster_copy_upl_data err %d\n", err); | |
1810 | } | |
1811 | uioPos += copied; | |
1812 | uioRemaining -= copied; | |
1813 | } | |
1814 | } | |
1815 | } | |
1816 | kr = commit_upl(upl, 0, curUplSize, UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE, 0); | |
1817 | if (err) { | |
1818 | goto out; | |
1819 | } | |
1820 | } | |
1821 | } else { | |
1822 | ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); | |
1823 | } | |
1824 | ||
1825 | uplRemaining -= curUplSize; | |
1826 | } | |
1827 | ||
1828 | out: | |
1829 | ||
1830 | if (hdr != NULL) { | |
1831 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
1832 | } | |
1833 | if (cmpdata_locked) { | |
1834 | decmpfs_unlock_compressed_data(cp, 0); | |
1835 | } | |
1836 | if (err) {/* something went wrong */ | |
1837 | ErrorLogWithPath("err %d\n", err); | |
1838 | return err; | |
1839 | } | |
1840 | ||
1841 | #if COMPRESSION_DEBUG | |
1842 | uplSize = uio_resid(uio); | |
1843 | if (uplSize) { | |
1844 | VerboseLogWithPath("still %lld bytes to copy\n", uplSize); | |
1845 | } | |
1846 | #endif | |
1847 | return 0; | |
1848 | } | |
1849 | ||
1850 | int | |
1851 | decmpfs_free_compressed_data(vnode_t vp, decmpfs_cnode *cp) | |
1852 | { | |
1853 | /* | |
1854 | * call out to the decompressor to free remove any data associated with this compressed file | |
1855 | * then delete the file's compression xattr | |
1856 | */ | |
1857 | decmpfs_header *hdr = NULL; | |
1858 | size_t alloc_size = 0; | |
1859 | ||
1860 | /* | |
1861 | * Trace the following parameters on entry with event-id 0x03120010. | |
1862 | * | |
1863 | * @vp->v_id: vnode-id of the file for which to free compressed data. | |
1864 | */ | |
1865 | DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id); | |
1866 | ||
1867 | int err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0, &alloc_size); | |
1868 | if (err) { | |
1869 | ErrorLogWithPath("decmpfs_fetch_compressed_header err %d\n", err); | |
1870 | } else { | |
1871 | lck_rw_lock_shared(&decompressorsLock); | |
1872 | decmpfs_free_compressed_data_func free_data = decmp_get_func(vp, hdr->compression_type, free_data); | |
1873 | if (free_data) { | |
1874 | err = free_data(vp, decmpfs_ctx, hdr); | |
1875 | } else { | |
1876 | /* nothing to do, so no error */ | |
1877 | err = 0; | |
1878 | } | |
1879 | lck_rw_unlock_shared(&decompressorsLock); | |
1880 | ||
1881 | if (err != 0) { | |
1882 | ErrorLogWithPath("decompressor err %d\n", err); | |
1883 | } | |
1884 | } | |
1885 | /* | |
1886 | * Trace the following parameters on return with event-id 0x03120010. | |
1887 | * | |
1888 | * @vp->v_id: vnode-id of the file for which to free compressed data. | |
1889 | * @err: value returned from this function. | |
1890 | */ | |
1891 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id, err); | |
1892 | ||
1893 | /* delete the xattr */ | |
1894 | err = vn_removexattr(vp, DECMPFS_XATTR_NAME, 0, decmpfs_ctx); | |
1895 | ||
1896 | if (hdr != NULL) { | |
1897 | kheap_free(KHEAP_TEMP, hdr, alloc_size); | |
1898 | } | |
1899 | return err; | |
1900 | } | |
1901 | ||
1902 | #pragma mark --- file conversion routines --- | |
1903 | ||
1904 | static int | |
1905 | unset_compressed_flag(vnode_t vp) | |
1906 | { | |
1907 | int err = 0; | |
1908 | struct vnode_attr va; | |
1909 | int new_bsdflags = 0; | |
1910 | ||
1911 | VATTR_INIT(&va); | |
1912 | VATTR_WANTED(&va, va_flags); | |
1913 | err = vnode_getattr(vp, &va, decmpfs_ctx); | |
1914 | ||
1915 | if (err != 0) { | |
1916 | ErrorLogWithPath("vnode_getattr err %d\n", err); | |
1917 | } else { | |
1918 | new_bsdflags = va.va_flags & ~UF_COMPRESSED; | |
1919 | ||
1920 | VATTR_INIT(&va); | |
1921 | VATTR_SET(&va, va_flags, new_bsdflags); | |
1922 | err = vnode_setattr(vp, &va, decmpfs_ctx); | |
1923 | if (err != 0) { | |
1924 | ErrorLogWithPath("vnode_setattr err %d\n", err); | |
1925 | } | |
1926 | } | |
1927 | return err; | |
1928 | } | |
1929 | ||
1930 | int | |
1931 | decmpfs_decompress_file(vnode_t vp, decmpfs_cnode *cp, off_t toSize, int truncate_okay, int skiplock) | |
1932 | { | |
1933 | /* convert a compressed file to an uncompressed file */ | |
1934 | ||
1935 | int err = 0; | |
1936 | char *data = NULL; | |
1937 | uio_t uio_w = 0; | |
1938 | off_t offset = 0; | |
1939 | uint32_t old_state = 0; | |
1940 | uint32_t new_state = 0; | |
1941 | int update_file_state = 0; | |
1942 | size_t allocSize = 0; | |
1943 | decmpfs_header *hdr = NULL; | |
1944 | size_t hdr_size = 0; | |
1945 | int cmpdata_locked = 0; | |
1946 | off_t remaining = 0; | |
1947 | uint64_t uncompressed_size = 0; | |
1948 | ||
1949 | /* | |
1950 | * Trace the following parameters on entry with event-id 0x03120000. | |
1951 | * | |
1952 | * @vp->v_id: vnode-id of the file being decompressed. | |
1953 | * @toSize: uncompress given bytes of the file. | |
1954 | * @truncate_okay: on error it is OK to truncate. | |
1955 | * @skiplock: compressed data is locked, skip locking again. | |
1956 | * | |
1957 | * Please NOTE: @toSize can overflow in theory but here it is safe. | |
1958 | */ | |
1959 | DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_DECOMPRESS_FILE, vp->v_id, | |
1960 | (int)toSize, truncate_okay, skiplock); | |
1961 | ||
1962 | if (!skiplock) { | |
1963 | decmpfs_lock_compressed_data(cp, 1); cmpdata_locked = 1; | |
1964 | } | |
1965 | ||
1966 | decompress: | |
1967 | old_state = decmpfs_fast_get_state(cp); | |
1968 | ||
1969 | switch (old_state) { | |
1970 | case FILE_IS_NOT_COMPRESSED: | |
1971 | { | |
1972 | /* someone else decompressed the file */ | |
1973 | err = 0; | |
1974 | goto out; | |
1975 | } | |
1976 | ||
1977 | case FILE_TYPE_UNKNOWN: | |
1978 | { | |
1979 | /* the file is in an unknown state, so update the state and retry */ | |
1980 | (void)decmpfs_file_is_compressed(vp, cp); | |
1981 | ||
1982 | /* try again */ | |
1983 | goto decompress; | |
1984 | } | |
1985 | ||
1986 | case FILE_IS_COMPRESSED: | |
1987 | { | |
1988 | /* the file is compressed, so decompress it */ | |
1989 | break; | |
1990 | } | |
1991 | ||
1992 | default: | |
1993 | { | |
1994 | /* | |
1995 | * this shouldn't happen since multiple calls to decmpfs_decompress_file lock each other out, | |
1996 | * and when decmpfs_decompress_file returns, the state should be always be set back to | |
1997 | * FILE_IS_NOT_COMPRESSED or FILE_IS_UNKNOWN | |
1998 | */ | |
1999 | err = EINVAL; | |
2000 | goto out; | |
2001 | } | |
2002 | } | |
2003 | ||
2004 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0, &hdr_size); | |
2005 | if (err != 0) { | |
2006 | goto out; | |
2007 | } | |
2008 | ||
2009 | uncompressed_size = hdr->uncompressed_size; | |
2010 | if (toSize == -1) { | |
2011 | toSize = hdr->uncompressed_size; | |
2012 | } | |
2013 | ||
2014 | if (toSize == 0) { | |
2015 | /* special case truncating the file to zero bytes */ | |
2016 | goto nodecmp; | |
2017 | } else if ((uint64_t)toSize > hdr->uncompressed_size) { | |
2018 | /* the caller is trying to grow the file, so we should decompress all the data */ | |
2019 | toSize = hdr->uncompressed_size; | |
2020 | } | |
2021 | ||
2022 | allocSize = MIN(64 * 1024, (size_t)toSize); | |
2023 | data = kheap_alloc(KHEAP_TEMP, allocSize, Z_WAITOK); | |
2024 | if (!data) { | |
2025 | err = ENOMEM; | |
2026 | goto out; | |
2027 | } | |
2028 | ||
2029 | uio_w = uio_create(1, 0LL, UIO_SYSSPACE, UIO_WRITE); | |
2030 | if (!uio_w) { | |
2031 | err = ENOMEM; | |
2032 | goto out; | |
2033 | } | |
2034 | uio_w->uio_flags |= UIO_FLAGS_IS_COMPRESSED_FILE; | |
2035 | ||
2036 | remaining = toSize; | |
2037 | ||
2038 | /* tell the buffer cache that this is an empty file */ | |
2039 | ubc_setsize(vp, 0); | |
2040 | ||
2041 | /* if we got here, we need to decompress the file */ | |
2042 | decmpfs_cnode_set_vnode_state(cp, FILE_IS_CONVERTING, 1); | |
2043 | ||
2044 | while (remaining > 0) { | |
2045 | /* loop decompressing data from the file and writing it into the data fork */ | |
2046 | ||
2047 | uint64_t bytes_read = 0; | |
2048 | decmpfs_vector vec = { .buf = data, .size = (user_ssize_t)MIN(allocSize, remaining) }; | |
2049 | err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, offset, vec.size, 1, &vec, &bytes_read); | |
2050 | if (err != 0) { | |
2051 | ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); | |
2052 | goto out; | |
2053 | } | |
2054 | ||
2055 | if (bytes_read == 0) { | |
2056 | /* we're done reading data */ | |
2057 | break; | |
2058 | } | |
2059 | ||
2060 | uio_reset(uio_w, offset, UIO_SYSSPACE, UIO_WRITE); | |
2061 | err = uio_addiov(uio_w, CAST_USER_ADDR_T(data), (user_size_t)bytes_read); | |
2062 | if (err != 0) { | |
2063 | ErrorLogWithPath("uio_addiov err %d\n", err); | |
2064 | err = ENOMEM; | |
2065 | goto out; | |
2066 | } | |
2067 | ||
2068 | err = VNOP_WRITE(vp, uio_w, 0, decmpfs_ctx); | |
2069 | if (err != 0) { | |
2070 | /* if the write failed, truncate the file to zero bytes */ | |
2071 | ErrorLogWithPath("VNOP_WRITE err %d\n", err); | |
2072 | break; | |
2073 | } | |
2074 | offset += bytes_read; | |
2075 | remaining -= bytes_read; | |
2076 | } | |
2077 | ||
2078 | if (err == 0) { | |
2079 | if (offset != toSize) { | |
2080 | ErrorLogWithPath("file decompressed to %lld instead of %lld\n", offset, toSize); | |
2081 | err = EINVAL; | |
2082 | goto out; | |
2083 | } | |
2084 | } | |
2085 | ||
2086 | if (err == 0) { | |
2087 | /* sync the data and metadata */ | |
2088 | err = VNOP_FSYNC(vp, MNT_WAIT, decmpfs_ctx); | |
2089 | if (err != 0) { | |
2090 | ErrorLogWithPath("VNOP_FSYNC err %d\n", err); | |
2091 | goto out; | |
2092 | } | |
2093 | } | |
2094 | ||
2095 | if (err != 0) { | |
2096 | /* write, setattr, or fsync failed */ | |
2097 | ErrorLogWithPath("aborting decompress, err %d\n", err); | |
2098 | if (truncate_okay) { | |
2099 | /* truncate anything we might have written */ | |
2100 | int error = vnode_setsize(vp, 0, 0, decmpfs_ctx); | |
2101 | ErrorLogWithPath("vnode_setsize err %d\n", error); | |
2102 | } | |
2103 | goto out; | |
2104 | } | |
2105 | ||
2106 | nodecmp: | |
2107 | /* if we're truncating the file to zero bytes, we'll skip ahead to here */ | |
2108 | ||
2109 | /* unset the compressed flag */ | |
2110 | unset_compressed_flag(vp); | |
2111 | ||
2112 | /* free the compressed data associated with this file */ | |
2113 | err = decmpfs_free_compressed_data(vp, cp); | |
2114 | if (err != 0) { | |
2115 | ErrorLogWithPath("decmpfs_free_compressed_data err %d\n", err); | |
2116 | } | |
2117 | ||
2118 | /* | |
2119 | * even if free_compressed_data or vnode_getattr/vnode_setattr failed, return success | |
2120 | * since we succeeded in writing all of the file data to the data fork | |
2121 | */ | |
2122 | err = 0; | |
2123 | ||
2124 | /* if we got this far, the file was successfully decompressed */ | |
2125 | update_file_state = 1; | |
2126 | new_state = FILE_IS_NOT_COMPRESSED; | |
2127 | ||
2128 | #if COMPRESSION_DEBUG | |
2129 | { | |
2130 | uint64_t filesize = 0; | |
2131 | vnsize(vp, &filesize); | |
2132 | DebugLogWithPath("new file size %lld\n", filesize); | |
2133 | } | |
2134 | #endif | |
2135 | ||
2136 | out: | |
2137 | if (hdr != NULL) { | |
2138 | kheap_free(KHEAP_TEMP, hdr, hdr_size); | |
2139 | } | |
2140 | kheap_free(KHEAP_TEMP, data, allocSize); | |
2141 | if (uio_w) { | |
2142 | uio_free(uio_w); | |
2143 | } | |
2144 | ||
2145 | if (err != 0) { | |
2146 | /* if there was a failure, reset compression flags to unknown and clear the buffer cache data */ | |
2147 | update_file_state = 1; | |
2148 | new_state = FILE_TYPE_UNKNOWN; | |
2149 | if (uncompressed_size) { | |
2150 | ubc_setsize(vp, 0); | |
2151 | ubc_setsize(vp, uncompressed_size); | |
2152 | } | |
2153 | } | |
2154 | ||
2155 | if (update_file_state) { | |
2156 | lck_mtx_lock(&decompress_channel_mtx); | |
2157 | decmpfs_cnode_set_vnode_state(cp, new_state, 1); | |
2158 | wakeup((caddr_t)&decompress_channel); /* wake up anyone who might have been waiting for decompression */ | |
2159 | lck_mtx_unlock(&decompress_channel_mtx); | |
2160 | } | |
2161 | ||
2162 | if (cmpdata_locked) { | |
2163 | decmpfs_unlock_compressed_data(cp, 1); | |
2164 | } | |
2165 | /* | |
2166 | * Trace the following parameters on return with event-id 0x03120000. | |
2167 | * | |
2168 | * @vp->v_id: vnode-id of the file being decompressed. | |
2169 | * @err: value returned from this function. | |
2170 | */ | |
2171 | DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_DECOMPRESS_FILE, vp->v_id, err); | |
2172 | return err; | |
2173 | } | |
2174 | ||
2175 | #pragma mark --- Type1 compressor --- | |
2176 | ||
2177 | /* | |
2178 | * The "Type1" compressor stores the data fork directly in the compression xattr | |
2179 | */ | |
2180 | ||
2181 | static int | |
2182 | decmpfs_validate_compressed_file_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr) | |
2183 | { | |
2184 | int err = 0; | |
2185 | ||
2186 | if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { | |
2187 | err = EINVAL; | |
2188 | goto out; | |
2189 | } | |
2190 | out: | |
2191 | return err; | |
2192 | } | |
2193 | ||
2194 | static int | |
2195 | decmpfs_fetch_uncompressed_data_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) | |
2196 | { | |
2197 | int err = 0; | |
2198 | int i; | |
2199 | user_ssize_t remaining; | |
2200 | ||
2201 | if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { | |
2202 | err = EINVAL; | |
2203 | goto out; | |
2204 | } | |
2205 | ||
2206 | #if COMPRESSION_DEBUG | |
2207 | static int dummy = 0; // prevent syslog from coalescing printfs | |
2208 | DebugLogWithPath("%d memcpy %lld at %lld\n", dummy++, size, (uint64_t)offset); | |
2209 | #endif | |
2210 | ||
2211 | remaining = size; | |
2212 | for (i = 0; (i < nvec) && (remaining > 0); i++) { | |
2213 | user_ssize_t curCopy = vec[i].size; | |
2214 | if (curCopy > remaining) { | |
2215 | curCopy = remaining; | |
2216 | } | |
2217 | memcpy(vec[i].buf, hdr->attr_bytes + offset, curCopy); | |
2218 | offset += curCopy; | |
2219 | remaining -= curCopy; | |
2220 | } | |
2221 | ||
2222 | if ((bytes_read) && (err == 0)) { | |
2223 | *bytes_read = (size - remaining); | |
2224 | } | |
2225 | ||
2226 | out: | |
2227 | return err; | |
2228 | } | |
2229 | ||
2230 | SECURITY_READ_ONLY_EARLY(static decmpfs_registration) Type1Reg = | |
2231 | { | |
2232 | .decmpfs_registration = DECMPFS_REGISTRATION_VERSION, | |
2233 | .validate = decmpfs_validate_compressed_file_Type1, | |
2234 | .adjust_fetch = NULL,/* no adjust necessary */ | |
2235 | .fetch = decmpfs_fetch_uncompressed_data_Type1, | |
2236 | .free_data = NULL,/* no free necessary */ | |
2237 | .get_flags = NULL/* no flags */ | |
2238 | }; | |
2239 | ||
2240 | #pragma mark --- decmpfs initialization --- | |
2241 | ||
2242 | void | |
2243 | decmpfs_init(void) | |
2244 | { | |
2245 | static int done = 0; | |
2246 | if (done) { | |
2247 | return; | |
2248 | } | |
2249 | ||
2250 | decmpfs_ctx = vfs_context_create(vfs_context_kernel()); | |
2251 | ||
2252 | register_decmpfs_decompressor(CMP_Type1, &Type1Reg); | |
2253 | ||
2254 | done = 1; | |
2255 | } | |
2256 | #endif /* FS_COMPRESSION */ |