]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #if !HFS_COMPRESSION | |
29 | /* we need these symbols even though compression is turned off */ | |
30 | char register_decmpfs_decompressor; | |
31 | char unregister_decmpfs_decompressor; | |
32 | #else /* HFS_COMPRESSION */ | |
33 | #include <sys/kernel.h> | |
34 | #include <sys/vnode_internal.h> | |
35 | #include <sys/file_internal.h> | |
36 | #include <sys/stat.h> | |
37 | #include <sys/fcntl.h> | |
38 | #include <sys/xattr.h> | |
39 | #include <sys/namei.h> | |
40 | #include <sys/user.h> | |
41 | #include <sys/mount_internal.h> | |
42 | #include <sys/ubc.h> | |
43 | #include <sys/decmpfs.h> | |
44 | #include <sys/uio_internal.h> | |
45 | #include <libkern/OSByteOrder.h> | |
46 | ||
47 | #pragma mark --- debugging --- | |
48 | ||
49 | #define COMPRESSION_DEBUG 0 | |
50 | #define COMPRESSION_DEBUG_VERBOSE 0 | |
51 | #define MALLOC_DEBUG 0 | |
52 | ||
53 | static const char * | |
54 | baseName(const char *path) | |
55 | { | |
56 | if (!path) | |
57 | return NULL; | |
58 | const char *ret = path; | |
59 | int i; | |
60 | for (i = 0; path[i] != 0; i++) { | |
61 | if (path[i] == '/') | |
62 | ret = &path[i + 1]; | |
63 | } | |
64 | return ret; | |
65 | } | |
66 | ||
67 | #define ErrorLog(x, args...) printf("%s:%d:%s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, ## args) | |
68 | ||
69 | #if COMPRESSION_DEBUG | |
70 | #define DebugLog ErrorLog | |
71 | #else | |
72 | #define DebugLog(x...) do { } while(0) | |
73 | #endif | |
74 | ||
75 | #if COMPRESSION_DEBUG_VERBOSE | |
76 | #define VerboseLog ErrorLog | |
77 | #else | |
78 | #define VerboseLog(x...) do { } while(0) | |
79 | #endif | |
80 | ||
81 | #if MALLOC_DEBUG | |
82 | ||
83 | static SInt32 totalAlloc; | |
84 | ||
85 | typedef struct { | |
86 | uint32_t allocSz; | |
87 | uint32_t magic; | |
88 | const char *file; | |
89 | int line; | |
90 | } allocated; | |
91 | ||
92 | static void * | |
93 | _malloc(uint32_t sz, __unused int type, __unused int flags, const char *file, int line) | |
94 | { | |
95 | uint32_t allocSz = sz + 2 * sizeof(allocated); | |
96 | ||
97 | allocated *alloc = NULL; | |
98 | MALLOC(alloc, allocated *, allocSz, type, flags); | |
99 | if (!alloc) { | |
100 | ErrorLog("malloc failed\n"); | |
101 | return NULL; | |
102 | } | |
103 | ||
104 | char *ret = (char*)&alloc[1]; | |
105 | allocated *alloc2 = (allocated*)(ret + sz); | |
106 | ||
107 | alloc->allocSz = allocSz; | |
108 | alloc->magic = 0xdadadada; | |
109 | alloc->file = file; | |
110 | alloc->line = line; | |
111 | ||
112 | *alloc2 = *alloc; | |
113 | ||
114 | int s = OSAddAtomic(sz, &totalAlloc); | |
115 | ErrorLog("malloc(%d) -> %p, total allocations %d\n", sz, ret, s + sz); | |
116 | ||
117 | return ret; | |
118 | } | |
119 | ||
120 | static void | |
121 | _free(char *ret, __unused int type, const char *file, int line) | |
122 | { | |
123 | if (!ret) { | |
124 | ErrorLog("freeing null\n"); | |
125 | return; | |
126 | } | |
127 | allocated *alloc = (allocated*)ret; | |
128 | alloc--; | |
129 | uint32_t sz = alloc->allocSz - 2 * sizeof(allocated); | |
130 | allocated *alloc2 = (allocated*)(ret + sz); | |
131 | ||
132 | if (alloc->magic != 0xdadadada) { | |
133 | panic("freeing bad pointer"); | |
134 | } | |
135 | ||
136 | if (memcmp(alloc, alloc2, sizeof(*alloc)) != 0) { | |
137 | panic("clobbered data"); | |
138 | } | |
139 | ||
140 | memset(ret, 0xce, sz); | |
141 | alloc2->file = file; | |
142 | alloc2->line = line; | |
143 | FREE(alloc, type); | |
144 | int s = OSAddAtomic(-sz, &totalAlloc); | |
145 | ErrorLog("free(%p,%d) -> total allocations %d\n", ret, sz, s - sz); | |
146 | } | |
147 | ||
148 | #undef MALLOC | |
149 | #undef FREE | |
150 | #define MALLOC(space, cast, size, type, flags) (space) = (cast)_malloc(size, type, flags, __FILE__, __LINE__) | |
151 | #define FREE(addr, type) _free((void *)addr, type, __FILE__, __LINE__) | |
152 | ||
153 | #endif /* MALLOC_DEBUG */ | |
154 | ||
155 | #pragma mark --- globals --- | |
156 | ||
157 | static lck_grp_t *decmpfs_lockgrp; | |
158 | ||
159 | static decmpfs_registration * decompressors[CMP_MAX]; /* the registered compressors */ | |
160 | static lck_rw_t * decompressorsLock; | |
161 | static int decompress_channel; /* channel used by decompress_file to wake up waiters */ | |
162 | static lck_mtx_t *decompress_channel_mtx; | |
163 | ||
164 | vfs_context_t decmpfs_ctx; | |
165 | ||
166 | #pragma mark --- decmp_get_func --- | |
167 | ||
168 | #define offsetof_func(func) ((uintptr_t)(&(((decmpfs_registration*)NULL)->func))) | |
169 | ||
170 | static void * | |
171 | _func_from_offset(uint32_t type, int offset) | |
172 | { | |
173 | /* get the function at the given offset in the registration for the given type */ | |
174 | decmpfs_registration *reg = decompressors[type]; | |
175 | char *regChar = (char*)reg; | |
176 | char *func = ®Char[offset]; | |
177 | void **funcPtr = (void**)func; | |
178 | return funcPtr[0]; | |
179 | } | |
180 | ||
181 | static void * | |
182 | _decmp_get_func(uint32_t type, int offset) | |
183 | { | |
184 | /* | |
185 | this function should be called while holding a shared lock to decompressorsLock, | |
186 | and will return with the lock held | |
187 | */ | |
188 | ||
189 | if (type >= CMP_MAX) | |
190 | return NULL; | |
191 | ||
192 | if (decompressors[type] != NULL) { | |
193 | // the compressor has already registered but the function might be null | |
194 | return _func_from_offset(type, offset); | |
195 | } | |
196 | ||
197 | // the compressor hasn't registered, so it never will unless someone manually kextloads it | |
198 | ErrorLog("tried to access a compressed file of unregistered type %d\n", type); | |
199 | return NULL; | |
200 | } | |
201 | ||
202 | #define decmp_get_func(type, func) _decmp_get_func(type, offsetof_func(func)) | |
203 | ||
204 | #pragma mark --- utilities --- | |
205 | ||
206 | #if COMPRESSION_DEBUG | |
207 | static char* | |
208 | vnpath(vnode_t vp, char *path, int len) | |
209 | { | |
210 | int origlen = len; | |
211 | path[0] = 0; | |
212 | vn_getpath(vp, path, &len); | |
213 | path[origlen - 1] = 0; | |
214 | return path; | |
215 | } | |
216 | ||
217 | static int | |
218 | vnsize(vnode_t vp, uint64_t *size) | |
219 | { | |
220 | struct vnode_attr va; | |
221 | VATTR_INIT(&va); | |
222 | VATTR_WANTED(&va, va_data_size); | |
223 | int error = vnode_getattr(vp, &va, decmpfs_ctx); | |
224 | if (error != 0) { | |
225 | ErrorLog("vnode_getattr err %d\n", error); | |
226 | return error; | |
227 | } | |
228 | *size = va.va_data_size; | |
229 | return 0; | |
230 | } | |
231 | #endif /* COMPRESSION_DEBUG */ | |
232 | ||
233 | #pragma mark --- cnode routines --- | |
234 | ||
235 | void | |
236 | decmpfs_cnode_init(decmpfs_cnode *cp) | |
237 | { | |
238 | memset(cp, 0, sizeof(*cp)); | |
239 | lck_rw_init(&cp->compressed_data_lock, decmpfs_lockgrp, NULL); | |
240 | #if !DECMPFS_SUPPORTS_SWAP64 | |
241 | lck_mtx_init(&cp->uncompressed_size_mtx, decmpfs_lockgrp, NULL); | |
242 | #endif | |
243 | } | |
244 | ||
245 | void | |
246 | decmpfs_cnode_destroy(decmpfs_cnode *cp) | |
247 | { | |
248 | lck_rw_destroy(&cp->compressed_data_lock, decmpfs_lockgrp); | |
249 | #if !DECMPFS_SUPPORTS_SWAP64 | |
250 | lck_mtx_destroy(&cp->uncompressed_size_mtx, decmpfs_lockgrp); | |
251 | #endif | |
252 | } | |
253 | ||
254 | boolean_t | |
255 | decmpfs_trylock_compressed_data(decmpfs_cnode *cp, int exclusive) | |
256 | { | |
257 | void *thread = current_thread(); | |
258 | boolean_t retval = FALSE; | |
259 | ||
260 | if (cp->lockowner == thread) { | |
261 | /* this thread is already holding an exclusive lock, so bump the count */ | |
262 | cp->lockcount++; | |
263 | retval = TRUE; | |
264 | } else if (exclusive) { | |
265 | if ((retval = lck_rw_try_lock_exclusive(&cp->compressed_data_lock))) { | |
266 | cp->lockowner = thread; | |
267 | cp->lockcount = 1; | |
268 | } | |
269 | } else { | |
270 | if ((retval = lck_rw_try_lock_shared(&cp->compressed_data_lock))) { | |
271 | cp->lockowner = (void *)-1; | |
272 | } | |
273 | } | |
274 | return retval; | |
275 | } | |
276 | ||
277 | void | |
278 | decmpfs_lock_compressed_data(decmpfs_cnode *cp, int exclusive) | |
279 | { | |
280 | void *thread = current_thread(); | |
281 | ||
282 | if (cp->lockowner == thread) { | |
283 | /* this thread is already holding an exclusive lock, so bump the count */ | |
284 | cp->lockcount++; | |
285 | } else if (exclusive) { | |
286 | lck_rw_lock_exclusive(&cp->compressed_data_lock); | |
287 | cp->lockowner = thread; | |
288 | cp->lockcount = 1; | |
289 | } else { | |
290 | lck_rw_lock_shared(&cp->compressed_data_lock); | |
291 | cp->lockowner = (void *)-1; | |
292 | } | |
293 | } | |
294 | ||
295 | void | |
296 | decmpfs_unlock_compressed_data(decmpfs_cnode *cp, __unused int exclusive) | |
297 | { | |
298 | void *thread = current_thread(); | |
299 | ||
300 | if (cp->lockowner == thread) { | |
301 | /* this thread is holding an exclusive lock, so decrement the count */ | |
302 | if ((--cp->lockcount) > 0) { | |
303 | /* the caller still has outstanding locks, so we're done */ | |
304 | return; | |
305 | } | |
306 | cp->lockowner = NULL; | |
307 | } | |
308 | ||
309 | lck_rw_done(&cp->compressed_data_lock); | |
310 | } | |
311 | ||
312 | uint32_t | |
313 | decmpfs_cnode_get_vnode_state(decmpfs_cnode *cp) | |
314 | { | |
315 | return cp->cmp_state; | |
316 | } | |
317 | ||
318 | void | |
319 | decmpfs_cnode_set_vnode_state(decmpfs_cnode *cp, uint32_t state, int skiplock) | |
320 | { | |
321 | if (!skiplock) decmpfs_lock_compressed_data(cp, 1); | |
322 | cp->cmp_state = state; | |
323 | if (state == FILE_TYPE_UNKNOWN) { | |
324 | /* clear out the compression type too */ | |
325 | cp->cmp_type = 0; | |
326 | } | |
327 | if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); | |
328 | } | |
329 | ||
330 | static void | |
331 | decmpfs_cnode_set_vnode_cmp_type(decmpfs_cnode *cp, uint32_t cmp_type, int skiplock) | |
332 | { | |
333 | if (!skiplock) decmpfs_lock_compressed_data(cp, 1); | |
334 | cp->cmp_type = cmp_type; | |
335 | if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); | |
336 | } | |
337 | ||
338 | static void | |
339 | decmpfs_cnode_set_vnode_minimal_xattr(decmpfs_cnode *cp, int minimal_xattr, int skiplock) | |
340 | { | |
341 | if (!skiplock) decmpfs_lock_compressed_data(cp, 1); | |
342 | cp->cmp_minimal_xattr = minimal_xattr; | |
343 | if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); | |
344 | } | |
345 | ||
346 | uint64_t | |
347 | decmpfs_cnode_get_vnode_cached_size(decmpfs_cnode *cp) | |
348 | { | |
349 | #if DECMPFS_SUPPORTS_SWAP64 | |
350 | return cp->uncompressed_size; | |
351 | #else | |
352 | /* | |
353 | since this is a 64-bit field, we may not be able to access it atomically | |
354 | so lock access | |
355 | */ | |
356 | ||
357 | lck_mtx_lock(&(cp->uncompressed_size_mtx)); | |
358 | uint64_t ret = cp->uncompressed_size; | |
359 | lck_mtx_unlock(&(cp->uncompressed_size_mtx)); | |
360 | return ret; | |
361 | #endif | |
362 | } | |
363 | ||
364 | static void | |
365 | decmpfs_cnode_set_vnode_cached_size(decmpfs_cnode *cp, uint64_t size) | |
366 | { | |
367 | #if DECMPFS_SUPPORTS_SWAP64 | |
368 | while(1) { | |
369 | uint64_t old = cp->uncompressed_size; | |
370 | if (OSCompareAndSwap64(old, size, (UInt64*)&cp->uncompressed_size)) { | |
371 | return; | |
372 | } else { | |
373 | /* failed to write our value, so loop */ | |
374 | } | |
375 | } | |
376 | #else | |
377 | /* | |
378 | since this is a 64-bit field, we may not be able to access it atomically | |
379 | so lock access | |
380 | */ | |
381 | ||
382 | lck_mtx_lock(&(cp->uncompressed_size_mtx)); | |
383 | cp->uncompressed_size = size; | |
384 | lck_mtx_unlock(&(cp->uncompressed_size_mtx)); | |
385 | #endif | |
386 | } | |
387 | ||
388 | #pragma mark --- decmpfs state routines --- | |
389 | ||
390 | static int | |
391 | decmpfs_fetch_compressed_header(vnode_t vp, decmpfs_cnode *cp, decmpfs_header **hdrOut, int returnInvalid) | |
392 | { | |
393 | /* | |
394 | fetches vp's compression xattr, converting it into a decmpfs_header; returns 0 or errno | |
395 | if returnInvalid == 1, returns the header even if the type was invalid (out of range), | |
396 | and return ERANGE in that case | |
397 | */ | |
398 | ||
399 | size_t read_size = 0; | |
400 | size_t attr_size = 0; | |
401 | uio_t attr_uio = NULL; | |
402 | int err = 0; | |
403 | char *data = NULL; | |
404 | decmpfs_header *hdr = NULL; | |
405 | char uio_buf[ UIO_SIZEOF(1) ]; | |
406 | ||
407 | if ((cp != NULL) && | |
408 | (cp->cmp_type != 0) && | |
409 | (cp->cmp_minimal_xattr != 0)) { | |
410 | /* this file's xattr didn't have any extra data when we fetched it, so we can synthesize a header from the data in the cnode */ | |
411 | ||
412 | MALLOC(data, char *, sizeof(decmpfs_header), M_TEMP, M_WAITOK); | |
413 | if (!data) { | |
414 | err = ENOMEM; | |
415 | goto out; | |
416 | } | |
417 | hdr = (decmpfs_header*)data; | |
418 | hdr->attr_size = sizeof(decmpfs_disk_header); | |
419 | hdr->compression_magic = DECMPFS_MAGIC; | |
420 | hdr->compression_type = cp->cmp_type; | |
421 | hdr->uncompressed_size = decmpfs_cnode_get_vnode_cached_size(cp); | |
422 | } else { | |
423 | /* figure out how big the xattr is on disk */ | |
424 | err = vn_getxattr(vp, DECMPFS_XATTR_NAME, NULL, &attr_size, XATTR_NOSECURITY, decmpfs_ctx); | |
425 | if (err != 0) | |
426 | goto out; | |
427 | ||
428 | if (attr_size < sizeof(decmpfs_disk_header) || attr_size > MAX_DECMPFS_XATTR_SIZE) { | |
429 | err = EINVAL; | |
430 | goto out; | |
431 | } | |
432 | ||
433 | /* allocation includes space for the extra attr_size field of a compressed_header */ | |
434 | MALLOC(data, char *, attr_size + sizeof(hdr->attr_size), M_TEMP, M_WAITOK); | |
435 | if (!data) { | |
436 | err = ENOMEM; | |
437 | goto out; | |
438 | } | |
439 | ||
440 | /* read the xattr into our buffer, skipping over the attr_size field at the beginning */ | |
441 | attr_uio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); | |
442 | uio_addiov(attr_uio, CAST_USER_ADDR_T(data + sizeof(hdr->attr_size)), attr_size); | |
443 | ||
444 | err = vn_getxattr(vp, DECMPFS_XATTR_NAME, attr_uio, &read_size, XATTR_NOSECURITY, decmpfs_ctx); | |
445 | if (err != 0) | |
446 | goto out; | |
447 | if (read_size != attr_size) { | |
448 | err = EINVAL; | |
449 | goto out; | |
450 | } | |
451 | hdr = (decmpfs_header*)data; | |
452 | hdr->attr_size = attr_size; | |
453 | /* swap the fields to native endian */ | |
454 | hdr->compression_magic = OSSwapLittleToHostInt32(hdr->compression_magic); | |
455 | hdr->compression_type = OSSwapLittleToHostInt32(hdr->compression_type); | |
456 | hdr->uncompressed_size = OSSwapLittleToHostInt64(hdr->uncompressed_size); | |
457 | } | |
458 | ||
459 | if (hdr->compression_magic != DECMPFS_MAGIC) { | |
460 | ErrorLog("invalid compression_magic 0x%08x, should be 0x%08x\n", hdr->compression_magic, DECMPFS_MAGIC); | |
461 | err = EINVAL; | |
462 | goto out; | |
463 | } | |
464 | ||
465 | if (hdr->compression_type >= CMP_MAX) { | |
466 | if (returnInvalid) { | |
467 | /* return the header even though the type is out of range */ | |
468 | err = ERANGE; | |
469 | } else { | |
470 | ErrorLog("compression_type %d out of range\n", hdr->compression_type); | |
471 | err = EINVAL; | |
472 | } | |
473 | goto out; | |
474 | } | |
475 | ||
476 | out: | |
477 | if (err && (err != ERANGE)) { | |
478 | DebugLog("err %d\n", err); | |
479 | if (data) FREE(data, M_TEMP); | |
480 | *hdrOut = NULL; | |
481 | } else { | |
482 | *hdrOut = hdr; | |
483 | } | |
484 | return err; | |
485 | } | |
486 | ||
487 | static int | |
488 | decmpfs_fast_get_state(decmpfs_cnode *cp) | |
489 | { | |
490 | /* | |
491 | return the cached state | |
492 | this should *only* be called when we know that decmpfs_file_is_compressed has already been called, | |
493 | because this implies that the cached state is valid | |
494 | */ | |
495 | int cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
496 | ||
497 | switch(cmp_state) { | |
498 | case FILE_IS_NOT_COMPRESSED: | |
499 | case FILE_IS_COMPRESSED: | |
500 | case FILE_IS_CONVERTING: | |
501 | return cmp_state; | |
502 | case FILE_TYPE_UNKNOWN: | |
503 | /* | |
504 | we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, | |
505 | which should not be possible | |
506 | */ | |
507 | ErrorLog("decmpfs_fast_get_state called on unknown file\n"); | |
508 | return FILE_IS_NOT_COMPRESSED; | |
509 | default: | |
510 | /* */ | |
511 | ErrorLog("unknown cmp_state %d\n", cmp_state); | |
512 | return FILE_IS_NOT_COMPRESSED; | |
513 | } | |
514 | } | |
515 | ||
516 | static int | |
517 | decmpfs_fast_file_is_compressed(decmpfs_cnode *cp) | |
518 | { | |
519 | int cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
520 | ||
521 | switch(cmp_state) { | |
522 | case FILE_IS_NOT_COMPRESSED: | |
523 | return 0; | |
524 | case FILE_IS_COMPRESSED: | |
525 | case FILE_IS_CONVERTING: | |
526 | return 1; | |
527 | case FILE_TYPE_UNKNOWN: | |
528 | /* | |
529 | we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, | |
530 | which should not be possible | |
531 | */ | |
532 | ErrorLog("decmpfs_fast_get_state called on unknown file\n"); | |
533 | return 0; | |
534 | default: | |
535 | /* */ | |
536 | ErrorLog("unknown cmp_state %d\n", cmp_state); | |
537 | return 0; | |
538 | } | |
539 | } | |
540 | ||
541 | errno_t | |
542 | decmpfs_validate_compressed_file(vnode_t vp, decmpfs_cnode *cp) | |
543 | { | |
544 | /* give a compressor a chance to indicate that a compressed file is invalid */ | |
545 | ||
546 | decmpfs_header *hdr = NULL; | |
547 | errno_t err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); | |
548 | if (err) { | |
549 | /* we couldn't get the header */ | |
550 | if (decmpfs_fast_get_state(cp) == FILE_IS_NOT_COMPRESSED) { | |
551 | /* the file is no longer compressed, so return success */ | |
552 | err = 0; | |
553 | } | |
554 | goto out; | |
555 | } | |
556 | ||
557 | lck_rw_lock_shared(decompressorsLock); | |
558 | decmpfs_validate_compressed_file_func validate = decmp_get_func(hdr->compression_type, validate); | |
559 | if (validate) { /* make sure this validation function is valid */ | |
560 | /* is the data okay? */ | |
561 | err = validate(vp, decmpfs_ctx, hdr); | |
562 | } else if (decmp_get_func(hdr->compression_type, fetch) == NULL) { | |
563 | /* the type isn't registered */ | |
564 | err = EIO; | |
565 | } else { | |
566 | /* no validate registered, so nothing to do */ | |
567 | err = 0; | |
568 | } | |
569 | lck_rw_done(decompressorsLock); | |
570 | out: | |
571 | if (hdr) FREE(hdr, M_TEMP); | |
572 | #if COMPRESSION_DEBUG | |
573 | if (err) { | |
574 | DebugLog("decmpfs_validate_compressed_file ret %d, vp->v_flag %d\n", err, vp->v_flag); | |
575 | } | |
576 | #endif | |
577 | return err; | |
578 | } | |
579 | ||
580 | int | |
581 | decmpfs_file_is_compressed(vnode_t vp, decmpfs_cnode *cp) | |
582 | { | |
583 | /* | |
584 | determines whether vp points to a compressed file | |
585 | ||
586 | to speed up this operation, we cache the result in the cnode, and do as little as possible | |
587 | in the case where the cnode already has a valid cached state | |
588 | ||
589 | */ | |
590 | ||
591 | int ret = 0; | |
592 | int error = 0; | |
593 | uint32_t cmp_state; | |
594 | struct vnode_attr va_fetch; | |
595 | decmpfs_header *hdr = NULL; | |
596 | mount_t mp = NULL; | |
597 | int cnode_locked = 0; | |
598 | int saveInvalid = 0; // save the header data even though the type was out of range | |
599 | ||
600 | if (vnode_isnamedstream(vp)) { | |
601 | /* | |
602 | named streams can't be compressed | |
603 | since named streams of the same file share the same cnode, | |
604 | we don't want to get/set the state in the cnode, just return 0 | |
605 | */ | |
606 | return 0; | |
607 | } | |
608 | ||
609 | /* examine the cached a state in this cnode */ | |
610 | cmp_state = decmpfs_cnode_get_vnode_state(cp); | |
611 | switch(cmp_state) { | |
612 | case FILE_IS_NOT_COMPRESSED: | |
613 | return 0; | |
614 | case FILE_IS_COMPRESSED: | |
615 | return 1; | |
616 | case FILE_IS_CONVERTING: | |
617 | /* treat the file as compressed, because this gives us a way to block future reads until decompression is done */ | |
618 | return 1; | |
619 | case FILE_TYPE_UNKNOWN: | |
620 | /* the first time we encountered this vnode, so we need to check it out */ | |
621 | break; | |
622 | default: | |
623 | /* unknown state, assume file is not compressed */ | |
624 | ErrorLog("unknown cmp_state %d\n", cmp_state); | |
625 | return 0; | |
626 | } | |
627 | ||
628 | if (!vnode_isreg(vp)) { | |
629 | /* only regular files can be compressed */ | |
630 | ret = FILE_IS_NOT_COMPRESSED; | |
631 | goto done; | |
632 | } | |
633 | ||
634 | mp = vnode_mount(vp); | |
635 | if (mp == NULL) { | |
636 | /* | |
637 | this should only be true before we mount the root filesystem | |
638 | we short-cut this return to avoid the call to getattr below, which | |
639 | will fail before root is mounted | |
640 | */ | |
641 | ret = FILE_IS_NOT_COMPRESSED; | |
642 | goto done; | |
643 | } | |
644 | if ((mp->mnt_flag & MNT_LOCAL) == 0) { | |
645 | /* compression only supported on local filesystems */ | |
646 | ret = FILE_IS_NOT_COMPRESSED; | |
647 | goto done; | |
648 | } | |
649 | ||
650 | /* lock our cnode data so that another caller doesn't change the state under us */ | |
651 | decmpfs_lock_compressed_data(cp, 1); | |
652 | cnode_locked = 1; | |
653 | ||
654 | VATTR_INIT(&va_fetch); | |
655 | VATTR_WANTED(&va_fetch, va_flags); | |
656 | error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); | |
657 | if (error) { | |
658 | /* failed to get the bsd flags so the file is not compressed */ | |
659 | ret = FILE_IS_NOT_COMPRESSED; | |
660 | goto done; | |
661 | } | |
662 | if (va_fetch.va_flags & UF_COMPRESSED) { | |
663 | /* UF_COMPRESSED is on, make sure the file has the DECMPFS_XATTR_NAME xattr */ | |
664 | error = decmpfs_fetch_compressed_header(vp, cp, &hdr, 1); | |
665 | if ((hdr != NULL) && (error == ERANGE)) { | |
666 | saveInvalid = 1; | |
667 | } | |
668 | if (error) { | |
669 | /* failed to get the xattr so the file is not compressed */ | |
670 | ret = FILE_IS_NOT_COMPRESSED; | |
671 | goto done; | |
672 | } | |
673 | /* we got the xattr, so the file is compressed */ | |
674 | ret = FILE_IS_COMPRESSED; | |
675 | goto done; | |
676 | } | |
677 | /* UF_COMPRESSED isn't on, so the file isn't compressed */ | |
678 | ret = FILE_IS_NOT_COMPRESSED; | |
679 | ||
680 | done: | |
681 | if (((ret == FILE_IS_COMPRESSED) || saveInvalid) && hdr) { | |
682 | /* | |
683 | cache the uncompressed size away in the cnode | |
684 | */ | |
685 | ||
686 | if (!cnode_locked) { | |
687 | /* | |
688 | we should never get here since the only place ret is set to FILE_IS_COMPRESSED | |
689 | is after the call to decmpfs_lock_compressed_data above | |
690 | */ | |
691 | decmpfs_lock_compressed_data(cp, 1); | |
692 | cnode_locked = 1; | |
693 | } | |
694 | ||
695 | decmpfs_cnode_set_vnode_cached_size(cp, hdr->uncompressed_size); | |
696 | decmpfs_cnode_set_vnode_state(cp, ret, 1); | |
697 | decmpfs_cnode_set_vnode_cmp_type(cp, hdr->compression_type, 1); | |
698 | /* remember if the xattr's size was equal to the minimal xattr */ | |
699 | if (hdr->attr_size == sizeof(decmpfs_disk_header)) { | |
700 | decmpfs_cnode_set_vnode_minimal_xattr(cp, 1, 1); | |
701 | } | |
702 | if (ret == FILE_IS_COMPRESSED) { | |
703 | /* update the ubc's size for this file */ | |
704 | ubc_setsize(vp, hdr->uncompressed_size); | |
705 | } | |
706 | } else { | |
707 | /* we might have already taken the lock above; if so, skip taking it again by passing cnode_locked as the skiplock parameter */ | |
708 | decmpfs_cnode_set_vnode_state(cp, ret, cnode_locked); | |
709 | } | |
710 | ||
711 | if (cnode_locked) decmpfs_unlock_compressed_data(cp, 1); | |
712 | ||
713 | if (hdr) FREE(hdr, M_TEMP); | |
714 | ||
715 | switch(ret) { | |
716 | case FILE_IS_NOT_COMPRESSED: | |
717 | return 0; | |
718 | case FILE_IS_COMPRESSED: | |
719 | case FILE_IS_CONVERTING: | |
720 | return 1; | |
721 | default: | |
722 | /* unknown state, assume file is not compressed */ | |
723 | ErrorLog("unknown ret %d\n", ret); | |
724 | return 0; | |
725 | } | |
726 | } | |
727 | ||
728 | int | |
729 | decmpfs_update_attributes(vnode_t vp, struct vnode_attr *vap) | |
730 | { | |
731 | int error = 0; | |
732 | ||
733 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
734 | /* the BSD flags are being updated */ | |
735 | if (vap->va_flags & UF_COMPRESSED) { | |
736 | /* the compressed bit is being set, did it change? */ | |
737 | struct vnode_attr va_fetch; | |
738 | int old_flags = 0; | |
739 | VATTR_INIT(&va_fetch); | |
740 | VATTR_WANTED(&va_fetch, va_flags); | |
741 | error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); | |
742 | if (error) | |
743 | return error; | |
744 | ||
745 | old_flags = va_fetch.va_flags; | |
746 | ||
747 | if (!(old_flags & UF_COMPRESSED)) { | |
748 | /* | |
749 | * Compression bit was turned on, make sure the file has the DECMPFS_XATTR_NAME attribute. | |
750 | * This precludes anyone from using the UF_COMPRESSED bit for anything else, and it enforces | |
751 | * an order of operation -- you must first do the setxattr and then the chflags. | |
752 | */ | |
753 | ||
754 | if (VATTR_IS_ACTIVE(vap, va_data_size)) { | |
755 | /* | |
756 | * don't allow the caller to set the BSD flag and the size in the same call | |
757 | * since this doesn't really make sense | |
758 | */ | |
759 | vap->va_flags &= ~UF_COMPRESSED; | |
760 | return 0; | |
761 | } | |
762 | ||
763 | decmpfs_header *hdr = NULL; | |
764 | error = decmpfs_fetch_compressed_header(vp, NULL, &hdr, 1); | |
765 | if (error == 0) { | |
766 | /* | |
767 | allow the flag to be set since the decmpfs attribute is present | |
768 | in that case, we also want to truncate the data fork of the file | |
769 | */ | |
770 | VATTR_SET_ACTIVE(vap, va_data_size); | |
771 | vap->va_data_size = 0; | |
772 | } else if (error == ERANGE) { | |
773 | /* the file had a decmpfs attribute but the type was out of range, so don't muck with the file's data size */ | |
774 | } else { | |
775 | /* no DECMPFS_XATTR_NAME attribute, so deny the update */ | |
776 | vap->va_flags &= ~UF_COMPRESSED; | |
777 | } | |
778 | if (hdr) FREE(hdr, M_TEMP); | |
779 | } | |
780 | } | |
781 | } | |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
786 | static int | |
787 | wait_for_decompress(decmpfs_cnode *cp) | |
788 | { | |
789 | int state; | |
790 | lck_mtx_lock(decompress_channel_mtx); | |
791 | do { | |
792 | state = decmpfs_fast_get_state(cp); | |
793 | if (state != FILE_IS_CONVERTING) { | |
794 | /* file is not decompressing */ | |
795 | lck_mtx_unlock(decompress_channel_mtx); | |
796 | return state; | |
797 | } | |
798 | msleep((caddr_t)&decompress_channel, decompress_channel_mtx, PINOD, "wait_for_decompress", NULL); | |
799 | } while(1); | |
800 | } | |
801 | ||
802 | #pragma mark --- decmpfs hide query routines --- | |
803 | ||
804 | int | |
805 | decmpfs_hides_rsrc(vfs_context_t ctx, decmpfs_cnode *cp) | |
806 | { | |
807 | /* | |
808 | WARNING!!! | |
809 | callers may (and do) pass NULL for ctx, so we should only use it | |
810 | for this equality comparison | |
811 | ||
812 | This routine should only be called after a file has already been through decmpfs_file_is_compressed | |
813 | */ | |
814 | ||
815 | if (ctx == decmpfs_ctx) | |
816 | return 0; | |
817 | ||
818 | if (!decmpfs_fast_file_is_compressed(cp)) | |
819 | return 0; | |
820 | ||
821 | /* all compressed files hide their resource fork */ | |
822 | return 1; | |
823 | } | |
824 | ||
825 | int | |
826 | decmpfs_hides_xattr(vfs_context_t ctx, decmpfs_cnode *cp, const char *xattr) | |
827 | { | |
828 | /* | |
829 | WARNING!!! | |
830 | callers may (and do) pass NULL for ctx, so we should only use it | |
831 | for this equality comparison | |
832 | ||
833 | This routine should only be called after a file has already been through decmpfs_file_is_compressed | |
834 | */ | |
835 | ||
836 | if (ctx == decmpfs_ctx) | |
837 | return 0; | |
838 | if (strncmp(xattr, XATTR_RESOURCEFORK_NAME, 22) == 0) | |
839 | return decmpfs_hides_rsrc(ctx, cp); | |
840 | if (!decmpfs_fast_file_is_compressed(cp)) | |
841 | /* file is not compressed, so don't hide this xattr */ | |
842 | return 0; | |
843 | if (strncmp(xattr, DECMPFS_XATTR_NAME, 11) == 0) | |
844 | /* it's our xattr, so hide it */ | |
845 | return 1; | |
846 | /* don't hide this xattr */ | |
847 | return 0; | |
848 | } | |
849 | ||
850 | #pragma mark --- registration/validation routines --- | |
851 | ||
852 | errno_t | |
853 | register_decmpfs_decompressor(uint32_t compression_type, decmpfs_registration *registration) | |
854 | { | |
855 | /* called by kexts to register decompressors */ | |
856 | ||
857 | errno_t ret = 0; | |
858 | int locked = 0; | |
859 | ||
860 | if ((compression_type >= CMP_MAX) || | |
861 | (!registration) || | |
862 | (registration->decmpfs_registration != DECMPFS_REGISTRATION_VERSION)) { | |
863 | ret = EINVAL; | |
864 | goto out; | |
865 | } | |
866 | ||
867 | lck_rw_lock_exclusive(decompressorsLock); locked = 1; | |
868 | ||
869 | /* make sure the registration for this type is zero */ | |
870 | if (decompressors[compression_type] != NULL) { | |
871 | ret = EEXIST; | |
872 | goto out; | |
873 | } | |
874 | decompressors[compression_type] = registration; | |
875 | wakeup((caddr_t)&decompressors); | |
876 | ||
877 | out: | |
878 | if (locked) lck_rw_done(decompressorsLock); | |
879 | return ret; | |
880 | } | |
881 | ||
882 | errno_t | |
883 | unregister_decmpfs_decompressor(uint32_t compression_type, decmpfs_registration *registration) | |
884 | { | |
885 | /* called by kexts to unregister decompressors */ | |
886 | ||
887 | errno_t ret = 0; | |
888 | int locked = 0; | |
889 | ||
890 | if ((compression_type >= CMP_MAX) || | |
891 | (!registration) || | |
892 | (registration->decmpfs_registration != DECMPFS_REGISTRATION_VERSION)) { | |
893 | ret = EINVAL; | |
894 | goto out; | |
895 | } | |
896 | ||
897 | lck_rw_lock_exclusive(decompressorsLock); locked = 1; | |
898 | if (decompressors[compression_type] != registration) { | |
899 | ret = EEXIST; | |
900 | goto out; | |
901 | } | |
902 | decompressors[compression_type] = NULL; | |
903 | wakeup((caddr_t)&decompressors); | |
904 | ||
905 | out: | |
906 | if (locked) lck_rw_done(decompressorsLock); | |
907 | return ret; | |
908 | } | |
909 | ||
910 | static int | |
911 | compression_type_valid(decmpfs_header *hdr) | |
912 | { | |
913 | /* fast pre-check to determine if the given compressor has checked in */ | |
914 | int ret = 0; | |
915 | ||
916 | /* every compressor must have at least a fetch function */ | |
917 | lck_rw_lock_shared(decompressorsLock); | |
918 | if (decmp_get_func(hdr->compression_type, fetch) != NULL) { | |
919 | ret = 1; | |
920 | } | |
921 | lck_rw_done(decompressorsLock); | |
922 | ||
923 | return ret; | |
924 | } | |
925 | ||
926 | #pragma mark --- compression/decompression routines --- | |
927 | ||
928 | static int | |
929 | decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) | |
930 | { | |
931 | /* get the uncompressed bytes for the specified region of vp by calling out to the registered compressor */ | |
932 | ||
933 | int err = 0; | |
934 | ||
935 | *bytes_read = 0; | |
936 | ||
937 | if ((uint64_t)offset >= hdr->uncompressed_size) { | |
938 | /* reading past end of file; nothing to do */ | |
939 | err = 0; | |
940 | goto out; | |
941 | } | |
942 | if (offset < 0) { | |
943 | /* tried to read from before start of file */ | |
944 | err = EINVAL; | |
945 | goto out; | |
946 | } | |
947 | if ((uint64_t)(offset + size) > hdr->uncompressed_size) { | |
948 | /* adjust size so we don't read past the end of the file */ | |
949 | size = hdr->uncompressed_size - offset; | |
950 | } | |
951 | if (size == 0) { | |
952 | /* nothing to read */ | |
953 | err = 0; | |
954 | goto out; | |
955 | } | |
956 | ||
957 | lck_rw_lock_shared(decompressorsLock); | |
958 | decmpfs_fetch_uncompressed_data_func fetch = decmp_get_func(hdr->compression_type, fetch); | |
959 | if (fetch) { | |
960 | err = fetch(vp, decmpfs_ctx, hdr, offset, size, nvec, vec, bytes_read); | |
961 | } else { | |
962 | err = ENOTSUP; | |
963 | } | |
964 | lck_rw_done(decompressorsLock); | |
965 | ||
966 | out: | |
967 | return err; | |
968 | } | |
969 | ||
970 | static kern_return_t | |
971 | commit_upl(upl_t upl, upl_offset_t pl_offset, size_t uplSize, int flags, int abort) | |
972 | { | |
973 | kern_return_t kr = 0; | |
974 | ||
975 | /* commit the upl pages */ | |
976 | if (abort) { | |
977 | VerboseLog("aborting upl, flags 0x%08x\n", flags); | |
978 | kr = ubc_upl_abort_range(upl, pl_offset, uplSize, flags); | |
979 | if (kr != KERN_SUCCESS) | |
980 | ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); | |
981 | } else { | |
982 | VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY); | |
983 | kr = ubc_upl_commit_range(upl, pl_offset, uplSize, flags | UPL_COMMIT_CLEAR_DIRTY); | |
984 | if (kr != KERN_SUCCESS) | |
985 | ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); | |
986 | } | |
987 | return kr; | |
988 | } | |
989 | ||
990 | errno_t | |
991 | decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmpfs_cnode *cp) | |
992 | { | |
993 | /* handles a page-in request from vfs for a compressed file */ | |
994 | ||
995 | int err = 0; | |
996 | struct vnode *vp = ap->a_vp; | |
997 | upl_t pl = ap->a_pl; | |
998 | upl_offset_t pl_offset = ap->a_pl_offset; | |
999 | off_t f_offset = ap->a_f_offset; | |
1000 | size_t size = ap->a_size; | |
1001 | int flags = ap->a_flags; | |
1002 | off_t uplPos = 0; | |
1003 | user_ssize_t uplSize = 0; | |
1004 | void *data = NULL; | |
1005 | decmpfs_header *hdr = NULL; | |
1006 | int abort_pagein = 0; | |
1007 | uint64_t cachedSize = 0; | |
1008 | int cmpdata_locked = 0; | |
1009 | ||
1010 | if(!decmpfs_trylock_compressed_data(cp, 0)) { | |
1011 | return EAGAIN; | |
1012 | } | |
1013 | cmpdata_locked = 1; | |
1014 | ||
1015 | ||
1016 | if (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD)) { | |
1017 | DebugLog("pagein: unknown flags 0x%08x\n", (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD))); | |
1018 | } | |
1019 | ||
1020 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); | |
1021 | if (err != 0) { | |
1022 | goto out; | |
1023 | } | |
1024 | ||
1025 | cachedSize = hdr->uncompressed_size; | |
1026 | ||
1027 | if (!compression_type_valid(hdr)) { | |
1028 | /* compressor not registered */ | |
1029 | err = ENOTSUP; | |
1030 | goto out; | |
1031 | } | |
1032 | ||
1033 | /* map the upl so we can fetch into it */ | |
1034 | kern_return_t kr = ubc_upl_map(pl, (vm_offset_t*)&data); | |
1035 | if ((kr != KERN_SUCCESS) || (data == NULL)) { | |
1036 | goto out; | |
1037 | } | |
1038 | ||
1039 | uplPos = f_offset; | |
1040 | uplSize = size; | |
1041 | ||
1042 | /* clip the size to the size of the file */ | |
1043 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1044 | /* truncate the read to the size of the file */ | |
1045 | uplSize = cachedSize - uplPos; | |
1046 | } | |
1047 | ||
1048 | /* do the fetch */ | |
1049 | decmpfs_vector vec; | |
1050 | ||
1051 | decompress: | |
1052 | /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */ | |
1053 | vec.buf = (char*)data + pl_offset; | |
1054 | vec.size = size; | |
1055 | ||
1056 | uint64_t did_read = 0; | |
1057 | if (decmpfs_fast_get_state(cp) == FILE_IS_CONVERTING) { | |
1058 | ErrorLog("unexpected pagein during decompress\n"); | |
1059 | /* | |
1060 | if the file is converting, this must be a recursive call to pagein from underneath a call to decmpfs_decompress_file; | |
1061 | pretend that it succeeded but don't do anything since we're just going to write over the pages anyway | |
1062 | */ | |
1063 | err = 0; | |
1064 | did_read = 0; | |
1065 | } else { | |
1066 | err = decmpfs_fetch_uncompressed_data(vp, hdr, uplPos, uplSize, 1, &vec, &did_read); | |
1067 | } | |
1068 | if (err) { | |
1069 | DebugLog("decmpfs_fetch_uncompressed_data err %d\n", err); | |
1070 | int cmp_state = decmpfs_fast_get_state(cp); | |
1071 | if (cmp_state == FILE_IS_CONVERTING) { | |
1072 | DebugLog("cmp_state == FILE_IS_CONVERTING\n"); | |
1073 | cmp_state = wait_for_decompress(cp); | |
1074 | if (cmp_state == FILE_IS_COMPRESSED) { | |
1075 | DebugLog("cmp_state == FILE_IS_COMPRESSED\n"); | |
1076 | /* a decompress was attempted but it failed, let's try calling fetch again */ | |
1077 | goto decompress; | |
1078 | } | |
1079 | } | |
1080 | if (cmp_state == FILE_IS_NOT_COMPRESSED) { | |
1081 | DebugLog("cmp_state == FILE_IS_NOT_COMPRESSED\n"); | |
1082 | /* the file was decompressed after we started reading it */ | |
1083 | abort_pagein = 1; /* we're not going to commit our data */ | |
1084 | *is_compressed = 0; /* instruct caller to fall back to its normal path */ | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | /* zero out whatever we didn't read, and zero out the end of the last page(s) */ | |
1089 | uint64_t total_size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | |
1090 | if (did_read < total_size) { | |
1091 | memset((char*)vec.buf + did_read, 0, total_size - did_read); | |
1092 | } | |
1093 | ||
1094 | kr = ubc_upl_unmap(pl); data = NULL; /* make sure to set data to NULL so we don't try to unmap again below */ | |
1095 | if (kr != KERN_SUCCESS) | |
1096 | ErrorLog("ubc_upl_unmap error %d\n", (int)kr); | |
1097 | else { | |
1098 | if (!abort_pagein) { | |
1099 | /* commit our pages */ | |
1100 | kr = commit_upl(pl, pl_offset, total_size, UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE, 0); | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | out: | |
1105 | if (data) ubc_upl_unmap(pl); | |
1106 | if (hdr) FREE(hdr, M_TEMP); | |
1107 | if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 0); | |
1108 | if (err) | |
1109 | ErrorLog("err %d\n", err); | |
1110 | ||
1111 | return err; | |
1112 | } | |
1113 | ||
1114 | errno_t | |
1115 | decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_cnode *cp) | |
1116 | { | |
1117 | /* handles a read request from vfs for a compressed file */ | |
1118 | ||
1119 | uio_t uio = ap->a_uio; | |
1120 | vnode_t vp = ap->a_vp; | |
1121 | int err = 0; | |
1122 | int countInt = 0; | |
1123 | off_t uplPos = 0; | |
1124 | user_ssize_t uplSize = 0; | |
1125 | user_ssize_t uplRemaining = 0; | |
1126 | off_t curUplPos = 0; | |
1127 | user_ssize_t curUplSize = 0; | |
1128 | kern_return_t kr = KERN_SUCCESS; | |
1129 | int abort_read = 0; | |
1130 | void *data = NULL; | |
1131 | uint64_t did_read = 0; | |
1132 | upl_t upl = NULL; | |
1133 | upl_page_info_t *pli = NULL; | |
1134 | decmpfs_header *hdr = NULL; | |
1135 | uint64_t cachedSize = 0; | |
1136 | off_t uioPos = 0; | |
1137 | user_ssize_t uioRemaining = 0; | |
1138 | int cmpdata_locked = 0; | |
1139 | ||
1140 | decmpfs_lock_compressed_data(cp, 0); cmpdata_locked = 1; | |
1141 | ||
1142 | uplPos = uio_offset(uio); | |
1143 | uplSize = uio_resid(uio); | |
1144 | VerboseLog("uplPos %lld uplSize %lld\n", uplPos, uplSize); | |
1145 | ||
1146 | cachedSize = decmpfs_cnode_get_vnode_cached_size(cp); | |
1147 | ||
1148 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1149 | /* truncate the read to the size of the file */ | |
1150 | uplSize = cachedSize - uplPos; | |
1151 | } | |
1152 | ||
1153 | /* give the cluster layer a chance to fill in whatever it already has */ | |
1154 | countInt = (uplSize > INT_MAX) ? INT_MAX : uplSize; | |
1155 | err = cluster_copy_ubc_data(vp, uio, &countInt, 0); | |
1156 | if (err != 0) | |
1157 | goto out; | |
1158 | ||
1159 | /* figure out what's left */ | |
1160 | uioPos = uio_offset(uio); | |
1161 | uioRemaining = uio_resid(uio); | |
1162 | if ((uint64_t)uioPos + uioRemaining > cachedSize) { | |
1163 | /* truncate the read to the size of the file */ | |
1164 | uioRemaining = cachedSize - uioPos; | |
1165 | } | |
1166 | ||
1167 | if (uioRemaining <= 0) { | |
1168 | /* nothing left */ | |
1169 | goto out; | |
1170 | } | |
1171 | ||
1172 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); | |
1173 | if (err != 0) { | |
1174 | goto out; | |
1175 | } | |
1176 | if (!compression_type_valid(hdr)) { | |
1177 | err = ENOTSUP; | |
1178 | goto out; | |
1179 | } | |
1180 | ||
1181 | uplPos = uioPos; | |
1182 | uplSize = uioRemaining; | |
1183 | #if COMPRESSION_DEBUG | |
1184 | char path[PATH_MAX]; | |
1185 | DebugLog("%s: uplPos %lld uplSize %lld\n", vnpath(vp, path, sizeof(path)), (uint64_t)uplPos, (uint64_t)uplSize); | |
1186 | #endif | |
1187 | ||
1188 | lck_rw_lock_shared(decompressorsLock); | |
1189 | decmpfs_adjust_fetch_region_func adjust_fetch = decmp_get_func(hdr->compression_type, adjust_fetch); | |
1190 | if (adjust_fetch) { | |
1191 | /* give the compressor a chance to adjust the portion of the file that we read */ | |
1192 | adjust_fetch(vp, decmpfs_ctx, hdr, &uplPos, &uplSize); | |
1193 | VerboseLog("adjusted uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); | |
1194 | } | |
1195 | lck_rw_done(decompressorsLock); | |
1196 | ||
1197 | /* clip the adjusted size to the size of the file */ | |
1198 | if ((uint64_t)uplPos + uplSize > cachedSize) { | |
1199 | /* truncate the read to the size of the file */ | |
1200 | uplSize = cachedSize - uplPos; | |
1201 | } | |
1202 | ||
1203 | if (uplSize <= 0) { | |
1204 | /* nothing left */ | |
1205 | goto out; | |
1206 | } | |
1207 | ||
1208 | /* | |
1209 | since we're going to create a upl for the given region of the file, | |
1210 | make sure we're on page boundaries | |
1211 | */ | |
1212 | ||
1213 | if (uplPos & (PAGE_SIZE - 1)) { | |
1214 | /* round position down to page boundary */ | |
1215 | uplSize += (uplPos & (PAGE_SIZE - 1)); | |
1216 | uplPos &= ~(PAGE_SIZE - 1); | |
1217 | } | |
1218 | /* round size up to page multiple */ | |
1219 | uplSize = (uplSize + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | |
1220 | ||
1221 | VerboseLog("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); | |
1222 | ||
1223 | uplRemaining = uplSize; | |
1224 | curUplPos = uplPos; | |
1225 | curUplSize = 0; | |
1226 | ||
1227 | while(uplRemaining > 0) { | |
1228 | /* start after the last upl */ | |
1229 | curUplPos += curUplSize; | |
1230 | ||
1231 | /* clip to max upl size */ | |
1232 | curUplSize = uplRemaining; | |
1233 | if (curUplSize > MAX_UPL_SIZE * PAGE_SIZE) { | |
1234 | curUplSize = MAX_UPL_SIZE * PAGE_SIZE; | |
1235 | } | |
1236 | ||
1237 | /* create the upl */ | |
1238 | kr = ubc_create_upl(vp, curUplPos, curUplSize, &upl, &pli, UPL_SET_LITE); | |
1239 | if (kr != KERN_SUCCESS) { | |
1240 | ErrorLog("ubc_create_upl error %d\n", (int)kr); | |
1241 | err = EINVAL; | |
1242 | goto out; | |
1243 | } | |
1244 | VerboseLog("curUplPos %lld curUplSize %lld\n", (uint64_t)curUplPos, (uint64_t)curUplSize); | |
1245 | ||
1246 | /* map the upl */ | |
1247 | kr = ubc_upl_map(upl, (vm_offset_t*)&data); | |
1248 | if (kr != KERN_SUCCESS) { | |
1249 | ErrorLog("ubc_upl_map error %d\n", (int)kr); | |
1250 | err = EINVAL; | |
1251 | goto out; | |
1252 | } | |
1253 | ||
1254 | /* make sure the map succeeded */ | |
1255 | if (!data) { | |
1256 | ErrorLog("ubc_upl_map mapped null\n"); | |
1257 | err = EINVAL; | |
1258 | goto out; | |
1259 | } | |
1260 | ||
1261 | /* fetch uncompressed data into the mapped upl */ | |
1262 | decmpfs_vector vec; | |
1263 | decompress: | |
1264 | vec = (decmpfs_vector){ .buf = data, .size = curUplSize }; | |
1265 | err = decmpfs_fetch_uncompressed_data(vp, hdr, curUplPos, curUplSize, 1, &vec, &did_read); | |
1266 | if (err) { | |
1267 | ErrorLog("decmpfs_fetch_uncompressed_data err %d\n", err); | |
1268 | ||
1269 | /* maybe the file is converting to decompressed */ | |
1270 | int cmp_state = decmpfs_fast_get_state(cp); | |
1271 | if (cmp_state == FILE_IS_CONVERTING) { | |
1272 | ErrorLog("cmp_state == FILE_IS_CONVERTING\n"); | |
1273 | cmp_state = wait_for_decompress(cp); | |
1274 | if (cmp_state == FILE_IS_COMPRESSED) { | |
1275 | ErrorLog("cmp_state == FILE_IS_COMPRESSED\n"); | |
1276 | /* a decompress was attempted but it failed, let's try fetching again */ | |
1277 | goto decompress; | |
1278 | } | |
1279 | } | |
1280 | if (cmp_state == FILE_IS_NOT_COMPRESSED) { | |
1281 | ErrorLog("cmp_state == FILE_IS_NOT_COMPRESSED\n"); | |
1282 | /* the file was decompressed after we started reading it */ | |
1283 | abort_read = 1; /* we're not going to commit our data */ | |
1284 | *is_compressed = 0; /* instruct caller to fall back to its normal path */ | |
1285 | } | |
1286 | kr = KERN_FAILURE; | |
1287 | did_read = 0; | |
1288 | } | |
1289 | /* zero out the remainder of the last page */ | |
1290 | memset((char*)data + did_read, 0, curUplSize - did_read); | |
1291 | kr = ubc_upl_unmap(upl); | |
1292 | if (kr == KERN_SUCCESS) { | |
1293 | if (abort_read) { | |
1294 | kr = commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); | |
1295 | } else { | |
1296 | VerboseLog("uioPos %lld uioRemaining %lld\n", (uint64_t)uioPos, (uint64_t)uioRemaining); | |
1297 | if (uioRemaining) { | |
1298 | off_t uplOff = uioPos - curUplPos; | |
1299 | if (uplOff < 0) { | |
1300 | ErrorLog("uplOff %lld should never be negative\n", (int64_t)uplOff); | |
1301 | err = EINVAL; | |
1302 | } else { | |
1303 | off_t count = curUplPos + curUplSize - uioPos; | |
1304 | if (count < 0) { | |
1305 | /* this upl is entirely before the uio */ | |
1306 | } else { | |
1307 | if (count > uioRemaining) | |
1308 | count = uioRemaining; | |
1309 | int io_resid = count; | |
1310 | err = cluster_copy_upl_data(uio, upl, uplOff, &io_resid); | |
1311 | int copied = count - io_resid; | |
1312 | VerboseLog("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff, (uint64_t)count, (uint64_t)copied); | |
1313 | if (err) { | |
1314 | ErrorLog("cluster_copy_upl_data err %d\n", err); | |
1315 | } | |
1316 | uioPos += copied; | |
1317 | uioRemaining -= copied; | |
1318 | } | |
1319 | } | |
1320 | } | |
1321 | kr = commit_upl(upl, 0, curUplSize, UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE, 0); | |
1322 | if (err) { | |
1323 | goto out; | |
1324 | } | |
1325 | } | |
1326 | } else { | |
1327 | ErrorLog("ubc_upl_unmap error %d\n", (int)kr); | |
1328 | } | |
1329 | ||
1330 | uplRemaining -= curUplSize; | |
1331 | } | |
1332 | ||
1333 | out: | |
1334 | if (hdr) FREE(hdr, M_TEMP); | |
1335 | if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 0); | |
1336 | if (err) {/* something went wrong */ | |
1337 | ErrorLog("err %d\n", err); | |
1338 | return err; | |
1339 | } | |
1340 | ||
1341 | #if COMPRESSION_DEBUG | |
1342 | uplSize = uio_resid(uio); | |
1343 | if (uplSize) | |
1344 | VerboseLog("still %lld bytes to copy\n", uplSize); | |
1345 | #endif | |
1346 | return 0; | |
1347 | } | |
1348 | ||
1349 | int | |
1350 | decmpfs_free_compressed_data(vnode_t vp, decmpfs_cnode *cp) | |
1351 | { | |
1352 | /* | |
1353 | call out to the decompressor to free remove any data associated with this compressed file | |
1354 | then delete the file's compression xattr | |
1355 | */ | |
1356 | ||
1357 | decmpfs_header *hdr = NULL; | |
1358 | int err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); | |
1359 | if (err) { | |
1360 | ErrorLog("decmpfs_fetch_compressed_header err %d\n", err); | |
1361 | } else { | |
1362 | lck_rw_lock_shared(decompressorsLock); | |
1363 | decmpfs_free_compressed_data_func free_data = decmp_get_func(hdr->compression_type, free_data); | |
1364 | if (free_data) { | |
1365 | err = free_data(vp, decmpfs_ctx, hdr); | |
1366 | } else { | |
1367 | /* nothing to do, so no error */ | |
1368 | err = 0; | |
1369 | } | |
1370 | lck_rw_done(decompressorsLock); | |
1371 | ||
1372 | if (err != 0) { | |
1373 | ErrorLog("decompressor err %d\n", err); | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | /* delete the xattr */ | |
1378 | err = vn_removexattr(vp, DECMPFS_XATTR_NAME, 0, decmpfs_ctx); | |
1379 | if (err != 0) { | |
1380 | goto out; | |
1381 | } | |
1382 | ||
1383 | out: | |
1384 | if (hdr) FREE(hdr, M_TEMP); | |
1385 | return err; | |
1386 | } | |
1387 | ||
1388 | #pragma mark --- file conversion routines --- | |
1389 | ||
1390 | static int | |
1391 | unset_compressed_flag(vnode_t vp) | |
1392 | { | |
1393 | int err = 0; | |
1394 | struct vnode_attr va; | |
1395 | int new_bsdflags = 0; | |
1396 | ||
1397 | VATTR_INIT(&va); | |
1398 | VATTR_WANTED(&va, va_flags); | |
1399 | err = vnode_getattr(vp, &va, decmpfs_ctx); | |
1400 | ||
1401 | if (err != 0) { | |
1402 | ErrorLog("vnode_getattr err %d\n", err); | |
1403 | } else { | |
1404 | new_bsdflags = va.va_flags & ~UF_COMPRESSED; | |
1405 | ||
1406 | VATTR_INIT(&va); | |
1407 | VATTR_SET(&va, va_flags, new_bsdflags); | |
1408 | err = vnode_setattr(vp, &va, decmpfs_ctx); | |
1409 | if (err != 0) { | |
1410 | ErrorLog("vnode_setattr err %d\n", err); | |
1411 | } | |
1412 | } | |
1413 | return err; | |
1414 | } | |
1415 | ||
1416 | int | |
1417 | decmpfs_decompress_file(vnode_t vp, decmpfs_cnode *cp, off_t toSize, int truncate_okay, int skiplock) | |
1418 | { | |
1419 | /* convert a compressed file to an uncompressed file */ | |
1420 | ||
1421 | int err = 0; | |
1422 | char *data = NULL; | |
1423 | uio_t uio_w = 0; | |
1424 | off_t offset = 0; | |
1425 | uint32_t old_state = 0; | |
1426 | uint32_t new_state = 0; | |
1427 | int update_file_state = 0; | |
1428 | int allocSize = 0; | |
1429 | decmpfs_header *hdr = NULL; | |
1430 | int cmpdata_locked = 0; | |
1431 | off_t remaining = 0; | |
1432 | uint64_t uncompressed_size = 0; | |
1433 | ||
1434 | if (!skiplock) { | |
1435 | decmpfs_lock_compressed_data(cp, 1); cmpdata_locked = 1; | |
1436 | } | |
1437 | ||
1438 | decompress: | |
1439 | old_state = decmpfs_fast_get_state(cp); | |
1440 | ||
1441 | switch(old_state) { | |
1442 | case FILE_IS_NOT_COMPRESSED: | |
1443 | { | |
1444 | /* someone else decompressed the file */ | |
1445 | err = 0; | |
1446 | goto out; | |
1447 | } | |
1448 | ||
1449 | case FILE_TYPE_UNKNOWN: | |
1450 | { | |
1451 | /* the file is in an unknown state, so update the state and retry */ | |
1452 | (void)decmpfs_file_is_compressed(vp, cp); | |
1453 | ||
1454 | /* try again */ | |
1455 | goto decompress; | |
1456 | } | |
1457 | ||
1458 | case FILE_IS_COMPRESSED: | |
1459 | { | |
1460 | /* the file is compressed, so decompress it */ | |
1461 | break; | |
1462 | } | |
1463 | ||
1464 | default: | |
1465 | { | |
1466 | /* | |
1467 | this shouldn't happen since multiple calls to decmpfs_decompress_file lock each other out, | |
1468 | and when decmpfs_decompress_file returns, the state should be always be set back to | |
1469 | FILE_IS_NOT_COMPRESSED or FILE_IS_UNKNOWN | |
1470 | */ | |
1471 | err = EINVAL; | |
1472 | goto out; | |
1473 | } | |
1474 | } | |
1475 | ||
1476 | err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); | |
1477 | if (err != 0) { | |
1478 | goto out; | |
1479 | } | |
1480 | ||
1481 | uncompressed_size = hdr->uncompressed_size; | |
1482 | if (toSize == -1) | |
1483 | toSize = hdr->uncompressed_size; | |
1484 | ||
1485 | if (toSize == 0) { | |
1486 | /* special case truncating the file to zero bytes */ | |
1487 | goto nodecmp; | |
1488 | } else if ((uint64_t)toSize > hdr->uncompressed_size) { | |
1489 | /* the caller is trying to grow the file, so we should decompress all the data */ | |
1490 | toSize = hdr->uncompressed_size; | |
1491 | } | |
1492 | ||
1493 | allocSize = MIN(64*1024, toSize); | |
1494 | MALLOC(data, char *, allocSize, M_TEMP, M_WAITOK); | |
1495 | if (!data) { | |
1496 | err = ENOMEM; | |
1497 | goto out; | |
1498 | } | |
1499 | ||
1500 | uio_w = uio_create(1, 0LL, UIO_SYSSPACE, UIO_WRITE); | |
1501 | if (!uio_w) { | |
1502 | err = ENOMEM; | |
1503 | goto out; | |
1504 | } | |
1505 | uio_w->uio_flags |= UIO_FLAGS_IS_COMPRESSED_FILE; | |
1506 | ||
1507 | remaining = toSize; | |
1508 | ||
1509 | /* tell the buffer cache that this is an empty file */ | |
1510 | ubc_setsize(vp, 0); | |
1511 | ||
1512 | /* if we got here, we need to decompress the file */ | |
1513 | decmpfs_cnode_set_vnode_state(cp, FILE_IS_CONVERTING, 1); | |
1514 | ||
1515 | while(remaining > 0) { | |
1516 | /* loop decompressing data from the file and writing it into the data fork */ | |
1517 | ||
1518 | uint64_t bytes_read = 0; | |
1519 | decmpfs_vector vec = { .buf = data, .size = MIN(allocSize, remaining) }; | |
1520 | err = decmpfs_fetch_uncompressed_data(vp, hdr, offset, vec.size, 1, &vec, &bytes_read); | |
1521 | if (err != 0) { | |
1522 | ErrorLog("decmpfs_fetch_uncompressed_data err %d\n", err); | |
1523 | goto out; | |
1524 | } | |
1525 | ||
1526 | if (bytes_read == 0) { | |
1527 | /* we're done reading data */ | |
1528 | break; | |
1529 | } | |
1530 | ||
1531 | uio_reset(uio_w, offset, UIO_SYSSPACE, UIO_WRITE); | |
1532 | err = uio_addiov(uio_w, CAST_USER_ADDR_T(data), bytes_read); | |
1533 | if (err != 0) { | |
1534 | ErrorLog("uio_addiov err %d\n", err); | |
1535 | err = ENOMEM; | |
1536 | goto out; | |
1537 | } | |
1538 | ||
1539 | err = VNOP_WRITE(vp, uio_w, 0, decmpfs_ctx); | |
1540 | if (err != 0) { | |
1541 | /* if the write failed, truncate the file to zero bytes */ | |
1542 | ErrorLog("VNOP_WRITE err %d\n", err); | |
1543 | break; | |
1544 | } | |
1545 | offset += bytes_read; | |
1546 | remaining -= bytes_read; | |
1547 | } | |
1548 | ||
1549 | if (err == 0) { | |
1550 | if (offset != toSize) { | |
1551 | ErrorLog("file decompressed to %lld instead of %lld\n", offset, toSize); | |
1552 | err = EINVAL; | |
1553 | goto out; | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | if (err == 0) { | |
1558 | /* sync the data and metadata */ | |
1559 | err = VNOP_FSYNC(vp, MNT_WAIT, decmpfs_ctx); | |
1560 | if (err != 0) { | |
1561 | ErrorLog("VNOP_FSYNC err %d\n", err); | |
1562 | goto out; | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | if (err != 0) { | |
1567 | /* write, setattr, or fsync failed */ | |
1568 | ErrorLog("aborting decompress, err %d\n", err); | |
1569 | if (truncate_okay) { | |
1570 | /* truncate anything we might have written */ | |
1571 | int error = vnode_setsize(vp, 0, 0, decmpfs_ctx); | |
1572 | ErrorLog("vnode_setsize err %d\n", error); | |
1573 | } | |
1574 | goto out; | |
1575 | } | |
1576 | ||
1577 | nodecmp: | |
1578 | /* if we're truncating the file to zero bytes, we'll skip ahead to here */ | |
1579 | ||
1580 | /* unset the compressed flag */ | |
1581 | unset_compressed_flag(vp); | |
1582 | ||
1583 | /* free the compressed data associated with this file */ | |
1584 | err = decmpfs_free_compressed_data(vp, cp); | |
1585 | if (err != 0) { | |
1586 | ErrorLog("decmpfs_free_compressed_data err %d\n", err); | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | even if free_compressed_data or vnode_getattr/vnode_setattr failed, return success | |
1591 | since we succeeded in writing all of the file data to the data fork | |
1592 | */ | |
1593 | err = 0; | |
1594 | ||
1595 | /* if we got this far, the file was successfully decompressed */ | |
1596 | update_file_state = 1; | |
1597 | new_state = FILE_IS_NOT_COMPRESSED; | |
1598 | ||
1599 | #if COMPRESSION_DEBUG | |
1600 | { | |
1601 | uint64_t filesize = 0; | |
1602 | vnsize(vp, &filesize); | |
1603 | DebugLog("new file size %lld\n", filesize); | |
1604 | } | |
1605 | #endif | |
1606 | ||
1607 | out: | |
1608 | if (hdr) FREE(hdr, M_TEMP); | |
1609 | if (data) FREE(data, M_TEMP); | |
1610 | if (uio_w) uio_free(uio_w); | |
1611 | ||
1612 | if (err != 0) { | |
1613 | /* if there was a failure, reset compression flags to unknown and clear the buffer cache data */ | |
1614 | update_file_state = 1; | |
1615 | new_state = FILE_TYPE_UNKNOWN; | |
1616 | if (uncompressed_size) { | |
1617 | ubc_setsize(vp, 0); | |
1618 | ubc_setsize(vp, uncompressed_size); | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | if (update_file_state) { | |
1623 | lck_mtx_lock(decompress_channel_mtx); | |
1624 | decmpfs_cnode_set_vnode_state(cp, new_state, 1); | |
1625 | wakeup((caddr_t)&decompress_channel); /* wake up anyone who might have been waiting for decompression */ | |
1626 | lck_mtx_unlock(decompress_channel_mtx); | |
1627 | } | |
1628 | ||
1629 | if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 1); | |
1630 | ||
1631 | return err; | |
1632 | } | |
1633 | ||
1634 | #pragma mark --- Type1 compressor --- | |
1635 | ||
1636 | /* | |
1637 | The "Type1" compressor stores the data fork directly in the compression xattr | |
1638 | */ | |
1639 | ||
1640 | static int | |
1641 | decmpfs_validate_compressed_file_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr) | |
1642 | { | |
1643 | int err = 0; | |
1644 | ||
1645 | if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { | |
1646 | err = EINVAL; | |
1647 | goto out; | |
1648 | } | |
1649 | out: | |
1650 | return err; | |
1651 | } | |
1652 | ||
1653 | static int | |
1654 | decmpfs_fetch_uncompressed_data_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) | |
1655 | { | |
1656 | int err = 0; | |
1657 | int i; | |
1658 | user_ssize_t remaining; | |
1659 | ||
1660 | if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { | |
1661 | err = EINVAL; | |
1662 | goto out; | |
1663 | } | |
1664 | ||
1665 | #if COMPRESSION_DEBUG | |
1666 | static int dummy = 0; // prevent syslog from coalescing printfs | |
1667 | char path[PATH_MAX]; | |
1668 | DebugLog("%s: %d memcpy %lld at %lld\n", vnpath(vp, path, sizeof(path)), dummy++, size, (uint64_t)offset); | |
1669 | #endif | |
1670 | ||
1671 | remaining = size; | |
1672 | for (i = 0; (i < nvec) && (remaining > 0); i++) { | |
1673 | user_ssize_t curCopy = vec[i].size; | |
1674 | if (curCopy > remaining) | |
1675 | curCopy = remaining; | |
1676 | memcpy(vec[i].buf, hdr->attr_bytes + offset, curCopy); | |
1677 | offset += curCopy; | |
1678 | remaining -= curCopy; | |
1679 | } | |
1680 | ||
1681 | if ((bytes_read) && (err == 0)) | |
1682 | *bytes_read = (size - remaining); | |
1683 | ||
1684 | out: | |
1685 | return err; | |
1686 | } | |
1687 | ||
1688 | static decmpfs_registration Type1Reg = | |
1689 | { | |
1690 | .decmpfs_registration = DECMPFS_REGISTRATION_VERSION, | |
1691 | .validate = decmpfs_validate_compressed_file_Type1, | |
1692 | .adjust_fetch = NULL, /* no adjust necessary */ | |
1693 | .fetch = decmpfs_fetch_uncompressed_data_Type1, | |
1694 | .free_data = NULL /* no free necessary */ | |
1695 | }; | |
1696 | ||
1697 | #pragma mark --- decmpfs initialization --- | |
1698 | ||
1699 | void decmpfs_init() | |
1700 | { | |
1701 | static int done = 0; | |
1702 | if (done) return; | |
1703 | ||
1704 | decmpfs_ctx = vfs_context_create(vfs_context_kernel()); | |
1705 | ||
1706 | lck_grp_attr_t *attr = lck_grp_attr_alloc_init(); | |
1707 | decmpfs_lockgrp = lck_grp_alloc_init("VFSCOMP", attr); | |
1708 | decompressorsLock = lck_rw_alloc_init(decmpfs_lockgrp, NULL); | |
1709 | decompress_channel_mtx = lck_mtx_alloc_init(decmpfs_lockgrp, NULL); | |
1710 | ||
1711 | register_decmpfs_decompressor(CMP_Type1, &Type1Reg); | |
1712 | ||
1713 | done = 1; | |
1714 | } | |
1715 | #endif /* HFS_COMPRESSION */ |