2 * Copyright (c) 2008-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 /* We need these symbols even though compression is turned off */
32 #define UNUSED_SYMBOL(x) asm(".global _" #x "\n.set _" #x ", 0\n");
34 UNUSED_SYMBOL(register_decmpfs_decompressor
)
35 UNUSED_SYMBOL(unregister_decmpfs_decompressor
)
36 UNUSED_SYMBOL(decmpfs_init
)
37 UNUSED_SYMBOL(decmpfs_read_compressed
)
38 UNUSED_SYMBOL(decmpfs_cnode_cmp_type
)
39 UNUSED_SYMBOL(decmpfs_cnode_get_vnode_state
)
40 UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_size
)
41 UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_nchildren
)
42 UNUSED_SYMBOL(decmpfs_cnode_get_vnode_cached_total_size
)
43 UNUSED_SYMBOL(decmpfs_lock_compressed_data
)
44 UNUSED_SYMBOL(decmpfs_cnode_free
)
45 UNUSED_SYMBOL(decmpfs_cnode_alloc
)
46 UNUSED_SYMBOL(decmpfs_cnode_destroy
)
47 UNUSED_SYMBOL(decmpfs_decompress_file
)
48 UNUSED_SYMBOL(decmpfs_unlock_compressed_data
)
49 UNUSED_SYMBOL(decmpfs_cnode_init
)
50 UNUSED_SYMBOL(decmpfs_cnode_set_vnode_state
)
51 UNUSED_SYMBOL(decmpfs_hides_xattr
)
52 UNUSED_SYMBOL(decmpfs_ctx
)
53 UNUSED_SYMBOL(decmpfs_file_is_compressed
)
54 UNUSED_SYMBOL(decmpfs_update_attributes
)
55 UNUSED_SYMBOL(decmpfs_hides_rsrc
)
56 UNUSED_SYMBOL(decmpfs_pagein_compressed
)
57 UNUSED_SYMBOL(decmpfs_validate_compressed_file
)
59 #else /* FS_COMPRESSION */
60 #include <sys/kernel.h>
61 #include <sys/vnode_internal.h>
62 #include <sys/file_internal.h>
64 #include <sys/fcntl.h>
65 #include <sys/xattr.h>
66 #include <sys/namei.h>
68 #include <sys/mount_internal.h>
70 #include <sys/decmpfs.h>
71 #include <sys/uio_internal.h>
72 #include <libkern/OSByteOrder.h>
73 #include <libkern/section_keywords.h>
77 #pragma mark --- debugging ---
79 #define COMPRESSION_DEBUG 0
80 #define COMPRESSION_DEBUG_VERBOSE 0
81 #define MALLOC_DEBUG 0
84 baseName(const char *path
)
89 const char *ret
= path
;
91 for (i
= 0; path
[i
] != 0; i
++) {
101 vnpath(vnode_t vp
, char *path
, int len
)
105 vn_getpath(vp
, path
, &len
);
106 path
[origlen
- 1] = 0;
111 #define ErrorLog(x, args...) printf("%s:%d:%s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, ## args)
112 #if COMPRESSION_DEBUG
113 #define ErrorLogWithPath(x, args...) do { char *path; MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); printf("%s:%d:%s: %s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, vnpath(vp, path, PATH_MAX), ## args); FREE(path, M_TEMP); } while(0)
115 #define ErrorLogWithPath(x, args...) do { (void*)vp; printf("%s:%d:%s: %s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, "<private>", ## args); } while(0)
118 #if COMPRESSION_DEBUG
119 #define DebugLog ErrorLog
120 #define DebugLogWithPath ErrorLogWithPath
122 #define DebugLog(x...) do { } while(0)
123 #define DebugLogWithPath(x...) do { } while(0)
126 #if COMPRESSION_DEBUG_VERBOSE
127 #define VerboseLog ErrorLog
128 #define VerboseLogWithPath ErrorLogWithPath
130 #define VerboseLog(x...) do { } while(0)
131 #define VerboseLogWithPath(x...) do { } while(0)
136 static SInt32 totalAlloc
;
146 _malloc(uint32_t sz
, __unused
int type
, __unused
int flags
, const char *file
, int line
)
148 uint32_t allocSz
= sz
+ 2 * sizeof(allocated
);
150 allocated
*alloc
= NULL
;
151 MALLOC(alloc
, allocated
*, allocSz
, type
, flags
);
153 ErrorLog("malloc failed\n");
157 char *ret
= (char*)&alloc
[1];
158 allocated
*alloc2
= (allocated
*)(ret
+ sz
);
160 alloc
->allocSz
= allocSz
;
161 alloc
->magic
= 0xdadadada;
167 int s
= OSAddAtomic(sz
, &totalAlloc
);
168 ErrorLog("malloc(%d) -> %p, total allocations %d\n", sz
, ret
, s
+ sz
);
174 _free(char *ret
, __unused
int type
, const char *file
, int line
)
177 ErrorLog("freeing null\n");
180 allocated
*alloc
= (allocated
*)ret
;
182 uint32_t sz
= alloc
->allocSz
- 2 * sizeof(allocated
);
183 allocated
*alloc2
= (allocated
*)(ret
+ sz
);
185 if (alloc
->magic
!= 0xdadadada) {
186 panic("freeing bad pointer");
189 if (memcmp(alloc
, alloc2
, sizeof(*alloc
)) != 0) {
190 panic("clobbered data");
193 memset(ret
, 0xce, sz
);
197 int s
= OSAddAtomic(-sz
, &totalAlloc
);
198 ErrorLog("free(%p,%d) -> total allocations %d\n", ret
, sz
, s
- sz
);
203 #define MALLOC(space, cast, size, type, flags) (space) = (cast)_malloc(size, type, flags, __FILE__, __LINE__)
204 #define FREE(addr, type) _free((void *)addr, type, __FILE__, __LINE__)
206 #endif /* MALLOC_DEBUG */
208 #pragma mark --- globals ---
210 static lck_grp_t
*decmpfs_lockgrp
;
212 static const decmpfs_registration
*decompressors
[CMP_MAX
]; /* the registered compressors */
213 static lck_rw_t
* decompressorsLock
;
214 static int decompress_channel
; /* channel used by decompress_file to wake up waiters */
215 static lck_mtx_t
*decompress_channel_mtx
;
217 vfs_context_t decmpfs_ctx
;
219 #pragma mark --- decmp_get_func ---
221 #define offsetof_func(func) ((uintptr_t)offsetof(decmpfs_registration, func))
224 _func_from_offset(uint32_t type
, uintptr_t offset
, uint32_t discriminator
)
226 /* get the function at the given offset in the registration for the given type */
227 const decmpfs_registration
*reg
= decompressors
[type
];
229 switch (reg
->decmpfs_registration
) {
230 case DECMPFS_REGISTRATION_VERSION_V1
:
231 if (offset
> offsetof_func(free_data
)) {
235 case DECMPFS_REGISTRATION_VERSION_V3
:
236 if (offset
> offsetof_func(get_flags
)) {
244 void *ptr
= *(void * const *)((const void *)reg
+ offset
);
246 /* Resign as a function-in-void* */
247 ptr
= ptrauth_auth_and_resign(ptr
, ptrauth_key_asia
, discriminator
, ptrauth_key_asia
, 0);
252 extern void IOServicePublishResource( const char * property
, boolean_t value
);
253 extern boolean_t
IOServiceWaitForMatchingResource( const char * property
, uint64_t timeout
);
254 extern boolean_t
IOCatalogueMatchingDriversPresent( const char * property
);
257 _decmp_get_func(vnode_t vp
, uint32_t type
, uintptr_t offset
, uint32_t discriminator
)
260 * this function should be called while holding a shared lock to decompressorsLock,
261 * and will return with the lock held
264 if (type
>= CMP_MAX
) {
268 if (decompressors
[type
] != NULL
) {
269 // the compressor has already registered but the function might be null
270 return _func_from_offset(type
, offset
, discriminator
);
273 // does IOKit know about a kext that is supposed to provide this type?
274 char providesName
[80];
275 snprintf(providesName
, sizeof(providesName
), "com.apple.AppleFSCompression.providesType%u", type
);
276 if (IOCatalogueMatchingDriversPresent(providesName
)) {
277 // there is a kext that says it will register for this type, so let's wait for it
278 char resourceName
[80];
279 uint64_t delay
= 10000000ULL; // 10 milliseconds.
280 snprintf(resourceName
, sizeof(resourceName
), "com.apple.AppleFSCompression.Type%u", type
);
281 ErrorLogWithPath("waiting for %s\n", resourceName
);
282 while (decompressors
[type
] == NULL
) {
283 lck_rw_unlock_shared(decompressorsLock
); // we have to unlock to allow the kext to register
284 if (IOServiceWaitForMatchingResource(resourceName
, delay
)) {
285 lck_rw_lock_shared(decompressorsLock
);
288 if (!IOCatalogueMatchingDriversPresent(providesName
)) {
290 ErrorLogWithPath("the kext with %s is no longer present\n", providesName
);
291 lck_rw_lock_shared(decompressorsLock
);
294 ErrorLogWithPath("still waiting for %s\n", resourceName
);
296 lck_rw_lock_shared(decompressorsLock
);
298 // IOKit says the kext is loaded, so it should be registered too!
299 if (decompressors
[type
] == NULL
) {
300 ErrorLogWithPath("we found %s, but the type still isn't registered\n", providesName
);
303 // it's now registered, so let's return the function
304 return _func_from_offset(type
, offset
, discriminator
);
307 // the compressor hasn't registered, so it never will unless someone manually kextloads it
308 ErrorLogWithPath("tried to access a compressed file of unregistered type %d\n", type
);
312 #define decmp_get_func(vp, type, func) (typeof(decompressors[0]->func))_decmp_get_func(vp, type, offsetof_func(func), ptrauth_function_pointer_type_discriminator(typeof(decompressors[0]->func)))
314 #pragma mark --- utilities ---
316 #if COMPRESSION_DEBUG
318 vnsize(vnode_t vp
, uint64_t *size
)
320 struct vnode_attr va
;
322 VATTR_WANTED(&va
, va_data_size
);
323 int error
= vnode_getattr(vp
, &va
, decmpfs_ctx
);
325 ErrorLogWithPath("vnode_getattr err %d\n", error
);
328 *size
= va
.va_data_size
;
331 #endif /* COMPRESSION_DEBUG */
333 #pragma mark --- cnode routines ---
335 ZONE_DECLARE(decmpfs_cnode_zone
, "decmpfs_cnode",
336 sizeof(struct decmpfs_cnode
), ZC_NONE
);
339 decmpfs_cnode_alloc(void)
341 return zalloc(decmpfs_cnode_zone
);
345 decmpfs_cnode_free(decmpfs_cnode
*dp
)
347 zfree(decmpfs_cnode_zone
, dp
);
351 decmpfs_cnode_init(decmpfs_cnode
*cp
)
353 memset(cp
, 0, sizeof(*cp
));
354 lck_rw_init(&cp
->compressed_data_lock
, decmpfs_lockgrp
, NULL
);
358 decmpfs_cnode_destroy(decmpfs_cnode
*cp
)
360 lck_rw_destroy(&cp
->compressed_data_lock
, decmpfs_lockgrp
);
364 decmpfs_trylock_compressed_data(decmpfs_cnode
*cp
, int exclusive
)
366 void *thread
= current_thread();
369 if (cp
->lockowner
== thread
) {
370 /* this thread is already holding an exclusive lock, so bump the count */
373 } else if (exclusive
) {
374 if ((retval
= lck_rw_try_lock_exclusive(&cp
->compressed_data_lock
))) {
375 cp
->lockowner
= thread
;
379 if ((retval
= lck_rw_try_lock_shared(&cp
->compressed_data_lock
))) {
380 cp
->lockowner
= (void *)-1;
387 decmpfs_lock_compressed_data(decmpfs_cnode
*cp
, int exclusive
)
389 void *thread
= current_thread();
391 if (cp
->lockowner
== thread
) {
392 /* this thread is already holding an exclusive lock, so bump the count */
394 } else if (exclusive
) {
395 lck_rw_lock_exclusive(&cp
->compressed_data_lock
);
396 cp
->lockowner
= thread
;
399 lck_rw_lock_shared(&cp
->compressed_data_lock
);
400 cp
->lockowner
= (void *)-1;
405 decmpfs_unlock_compressed_data(decmpfs_cnode
*cp
, __unused
int exclusive
)
407 void *thread
= current_thread();
409 if (cp
->lockowner
== thread
) {
410 /* this thread is holding an exclusive lock, so decrement the count */
411 if ((--cp
->lockcount
) > 0) {
412 /* the caller still has outstanding locks, so we're done */
415 cp
->lockowner
= NULL
;
418 lck_rw_done(&cp
->compressed_data_lock
);
422 decmpfs_cnode_get_vnode_state(decmpfs_cnode
*cp
)
424 return cp
->cmp_state
;
428 decmpfs_cnode_set_vnode_state(decmpfs_cnode
*cp
, uint32_t state
, int skiplock
)
431 decmpfs_lock_compressed_data(cp
, 1);
433 cp
->cmp_state
= (uint8_t)state
;
434 if (state
== FILE_TYPE_UNKNOWN
) {
435 /* clear out the compression type too */
439 decmpfs_unlock_compressed_data(cp
, 1);
444 decmpfs_cnode_set_vnode_cmp_type(decmpfs_cnode
*cp
, uint32_t cmp_type
, int skiplock
)
447 decmpfs_lock_compressed_data(cp
, 1);
449 cp
->cmp_type
= cmp_type
;
451 decmpfs_unlock_compressed_data(cp
, 1);
456 decmpfs_cnode_set_vnode_minimal_xattr(decmpfs_cnode
*cp
, int minimal_xattr
, int skiplock
)
459 decmpfs_lock_compressed_data(cp
, 1);
461 cp
->cmp_minimal_xattr
= !!minimal_xattr
;
463 decmpfs_unlock_compressed_data(cp
, 1);
468 decmpfs_cnode_get_vnode_cached_size(decmpfs_cnode
*cp
)
470 return cp
->uncompressed_size
;
474 decmpfs_cnode_get_vnode_cached_nchildren(decmpfs_cnode
*cp
)
476 return cp
->nchildren
;
480 decmpfs_cnode_get_vnode_cached_total_size(decmpfs_cnode
*cp
)
482 return cp
->total_size
;
486 decmpfs_cnode_set_vnode_cached_size(decmpfs_cnode
*cp
, uint64_t size
)
489 uint64_t old
= cp
->uncompressed_size
;
490 if (OSCompareAndSwap64(old
, size
, (UInt64
*)&cp
->uncompressed_size
)) {
493 /* failed to write our value, so loop */
499 decmpfs_cnode_set_vnode_cached_nchildren(decmpfs_cnode
*cp
, uint64_t nchildren
)
502 uint64_t old
= cp
->nchildren
;
503 if (OSCompareAndSwap64(old
, nchildren
, (UInt64
*)&cp
->nchildren
)) {
506 /* failed to write our value, so loop */
512 decmpfs_cnode_set_vnode_cached_total_size(decmpfs_cnode
*cp
, uint64_t total_sz
)
515 uint64_t old
= cp
->total_size
;
516 if (OSCompareAndSwap64(old
, total_sz
, (UInt64
*)&cp
->total_size
)) {
519 /* failed to write our value, so loop */
525 decmpfs_cnode_get_decompression_flags(decmpfs_cnode
*cp
)
527 return cp
->decompression_flags
;
531 decmpfs_cnode_set_decompression_flags(decmpfs_cnode
*cp
, uint64_t flags
)
534 uint64_t old
= cp
->decompression_flags
;
535 if (OSCompareAndSwap64(old
, flags
, (UInt64
*)&cp
->decompression_flags
)) {
538 /* failed to write our value, so loop */
544 decmpfs_cnode_cmp_type(decmpfs_cnode
*cp
)
549 #pragma mark --- decmpfs state routines ---
552 decmpfs_fetch_compressed_header(vnode_t vp
, decmpfs_cnode
*cp
, decmpfs_header
**hdrOut
, int returnInvalid
)
555 * fetches vp's compression xattr, converting it into a decmpfs_header; returns 0 or errno
556 * if returnInvalid == 1, returns the header even if the type was invalid (out of range),
557 * and return ERANGE in that case
560 size_t read_size
= 0;
561 size_t attr_size
= 0;
562 uio_t attr_uio
= NULL
;
565 const bool no_additional_data
= ((cp
!= NULL
)
566 && (cp
->cmp_type
!= 0)
567 && (cp
->cmp_minimal_xattr
!= 0));
568 char uio_buf
[UIO_SIZEOF(1)];
569 decmpfs_header
*hdr
= NULL
;
572 * Trace the following parameters on entry with event-id 0x03120004
574 * @vp->v_id: vnode-id for which to fetch compressed header.
575 * @no_additional_data: If set true then xattr didn't have any extra data.
576 * @returnInvalid: return the header even though the type is out of range.
578 DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_COMPRESSED_HEADER
, vp
->v_id
,
579 no_additional_data
, returnInvalid
);
581 if (no_additional_data
) {
582 /* this file's xattr didn't have any extra data when we fetched it, so we can synthesize a header from the data in the cnode */
584 MALLOC(data
, char *, sizeof(decmpfs_header
), M_TEMP
, M_WAITOK
);
589 hdr
= (decmpfs_header
*)data
;
590 hdr
->attr_size
= sizeof(decmpfs_disk_header
);
591 hdr
->compression_magic
= DECMPFS_MAGIC
;
592 hdr
->compression_type
= cp
->cmp_type
;
593 if (hdr
->compression_type
== DATALESS_PKG_CMPFS_TYPE
) {
594 if (!vnode_isdir(vp
)) {
598 hdr
->_size
.value
= DECMPFS_PKG_VALUE_FROM_SIZE_COUNT(
599 decmpfs_cnode_get_vnode_cached_size(cp
),
600 decmpfs_cnode_get_vnode_cached_nchildren(cp
));
601 } else if (vnode_isdir(vp
)) {
602 hdr
->_size
.value
= decmpfs_cnode_get_vnode_cached_nchildren(cp
);
604 hdr
->_size
.value
= decmpfs_cnode_get_vnode_cached_size(cp
);
607 /* figure out how big the xattr is on disk */
608 err
= vn_getxattr(vp
, DECMPFS_XATTR_NAME
, NULL
, &attr_size
, XATTR_NOSECURITY
, decmpfs_ctx
);
613 if (attr_size
< sizeof(decmpfs_disk_header
) || attr_size
> MAX_DECMPFS_XATTR_SIZE
) {
618 /* allocation includes space for the extra attr_size field of a compressed_header */
619 MALLOC(data
, char *, attr_size
+ sizeof(hdr
->attr_size
), M_TEMP
, M_WAITOK
);
625 /* read the xattr into our buffer, skipping over the attr_size field at the beginning */
626 attr_uio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
627 uio_addiov(attr_uio
, CAST_USER_ADDR_T(data
+ sizeof(hdr
->attr_size
)), attr_size
);
629 err
= vn_getxattr(vp
, DECMPFS_XATTR_NAME
, attr_uio
, &read_size
, XATTR_NOSECURITY
, decmpfs_ctx
);
633 if (read_size
!= attr_size
) {
637 hdr
= (decmpfs_header
*)data
;
638 hdr
->attr_size
= (uint32_t)attr_size
;
639 /* swap the fields to native endian */
640 hdr
->compression_magic
= OSSwapLittleToHostInt32(hdr
->compression_magic
);
641 hdr
->compression_type
= OSSwapLittleToHostInt32(hdr
->compression_type
);
642 hdr
->uncompressed_size
= OSSwapLittleToHostInt64(hdr
->uncompressed_size
);
645 if (hdr
->compression_magic
!= DECMPFS_MAGIC
) {
646 ErrorLogWithPath("invalid compression_magic 0x%08x, should be 0x%08x\n", hdr
->compression_magic
, DECMPFS_MAGIC
);
652 * Special-case the DATALESS compressor here; that is a valid type,
653 * even through there will never be an entry in the decompressor
654 * handler table for it. If we don't do this, then the cmp_state
655 * for this cnode will end up being marked NOT_COMPRESSED, and
656 * we'll be stuck in limbo.
658 if (hdr
->compression_type
>= CMP_MAX
&& !decmpfs_type_is_dataless(hdr
->compression_type
)) {
660 /* return the header even though the type is out of range */
663 ErrorLogWithPath("compression_type %d out of range\n", hdr
->compression_type
);
670 if (err
&& (err
!= ERANGE
)) {
671 DebugLogWithPath("err %d\n", err
);
680 * Trace the following parameters on return with event-id 0x03120004.
682 * @vp->v_id: vnode-id for which to fetch compressed header.
683 * @err: value returned from this function.
685 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_COMPRESSED_HEADER
, vp
->v_id
, err
);
690 decmpfs_fast_get_state(decmpfs_cnode
*cp
)
693 * return the cached state
694 * this should *only* be called when we know that decmpfs_file_is_compressed has already been called,
695 * because this implies that the cached state is valid
697 int cmp_state
= decmpfs_cnode_get_vnode_state(cp
);
700 case FILE_IS_NOT_COMPRESSED
:
701 case FILE_IS_COMPRESSED
:
702 case FILE_IS_CONVERTING
:
704 case FILE_TYPE_UNKNOWN
:
706 * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode,
707 * which should not be possible
709 ErrorLog("decmpfs_fast_get_state called on unknown file\n");
710 return FILE_IS_NOT_COMPRESSED
;
713 ErrorLog("unknown cmp_state %d\n", cmp_state
);
714 return FILE_IS_NOT_COMPRESSED
;
719 decmpfs_fast_file_is_compressed(decmpfs_cnode
*cp
)
721 int cmp_state
= decmpfs_cnode_get_vnode_state(cp
);
724 case FILE_IS_NOT_COMPRESSED
:
726 case FILE_IS_COMPRESSED
:
727 case FILE_IS_CONVERTING
:
729 case FILE_TYPE_UNKNOWN
:
731 * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode,
732 * which should not be possible
734 ErrorLog("decmpfs_fast_get_state called on unknown file\n");
738 ErrorLog("unknown cmp_state %d\n", cmp_state
);
744 decmpfs_validate_compressed_file(vnode_t vp
, decmpfs_cnode
*cp
)
746 /* give a compressor a chance to indicate that a compressed file is invalid */
748 decmpfs_header
*hdr
= NULL
;
749 errno_t err
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 0);
751 /* we couldn't get the header */
752 if (decmpfs_fast_get_state(cp
) == FILE_IS_NOT_COMPRESSED
) {
753 /* the file is no longer compressed, so return success */
759 if (!decmpfs_type_is_dataless(hdr
->compression_type
)) {
760 lck_rw_lock_shared(decompressorsLock
);
761 decmpfs_validate_compressed_file_func validate
= decmp_get_func(vp
, hdr
->compression_type
, validate
);
762 if (validate
) { /* make sure this validation function is valid */
763 /* is the data okay? */
764 err
= validate(vp
, decmpfs_ctx
, hdr
);
765 } else if (decmp_get_func(vp
, hdr
->compression_type
, fetch
) == NULL
) {
766 /* the type isn't registered */
769 /* no validate registered, so nothing to do */
772 lck_rw_unlock_shared(decompressorsLock
);
778 #if COMPRESSION_DEBUG
780 DebugLogWithPath("decmpfs_validate_compressed_file ret %d, vp->v_flag %d\n", err
, vp
->v_flag
);
787 decmpfs_file_is_compressed(vnode_t vp
, decmpfs_cnode
*cp
)
790 * determines whether vp points to a compressed file
792 * to speed up this operation, we cache the result in the cnode, and do as little as possible
793 * in the case where the cnode already has a valid cached state
800 struct vnode_attr va_fetch
;
801 decmpfs_header
*hdr
= NULL
;
803 int cnode_locked
= 0;
804 int saveInvalid
= 0; // save the header data even though the type was out of range
805 uint64_t decompression_flags
= 0;
806 bool is_mounted
, is_local_fs
;
808 if (vnode_isnamedstream(vp
)) {
810 * named streams can't be compressed
811 * since named streams of the same file share the same cnode,
812 * we don't want to get/set the state in the cnode, just return 0
817 /* examine the cached a state in this cnode */
818 cmp_state
= decmpfs_cnode_get_vnode_state(cp
);
820 case FILE_IS_NOT_COMPRESSED
:
822 case FILE_IS_COMPRESSED
:
824 case FILE_IS_CONVERTING
:
825 /* treat the file as compressed, because this gives us a way to block future reads until decompression is done */
827 case FILE_TYPE_UNKNOWN
:
828 /* the first time we encountered this vnode, so we need to check it out */
831 /* unknown state, assume file is not compressed */
832 ErrorLogWithPath("unknown cmp_state %d\n", cmp_state
);
838 mp
= vnode_mount(vp
);
843 is_local_fs
= ((mp
->mnt_flag
& MNT_LOCAL
));
846 * Trace the following parameters on entry with event-id 0x03120014.
848 * @vp->v_id: vnode-id of the file being queried.
849 * @is_mounted: set to true if @vp belongs to a mounted fs.
850 * @is_local_fs: set to true if @vp belongs to local fs.
852 DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FILE_IS_COMPRESSED
, vp
->v_id
,
853 is_mounted
, is_local_fs
);
857 * this should only be true before we mount the root filesystem
858 * we short-cut this return to avoid the call to getattr below, which
859 * will fail before root is mounted
861 ret
= FILE_IS_NOT_COMPRESSED
;
866 /* compression only supported on local filesystems */
867 ret
= FILE_IS_NOT_COMPRESSED
;
871 /* lock our cnode data so that another caller doesn't change the state under us */
872 decmpfs_lock_compressed_data(cp
, 1);
875 VATTR_INIT(&va_fetch
);
876 VATTR_WANTED(&va_fetch
, va_flags
);
877 error
= vnode_getattr(vp
, &va_fetch
, decmpfs_ctx
);
879 /* failed to get the bsd flags so the file is not compressed */
880 ret
= FILE_IS_NOT_COMPRESSED
;
883 if (va_fetch
.va_flags
& UF_COMPRESSED
) {
884 /* UF_COMPRESSED is on, make sure the file has the DECMPFS_XATTR_NAME xattr */
885 error
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 1);
886 if ((hdr
!= NULL
) && (error
== ERANGE
)) {
890 /* failed to get the xattr so the file is not compressed */
891 ret
= FILE_IS_NOT_COMPRESSED
;
895 * We got the xattr, so the file is at least tagged compressed.
896 * For DATALESS, regular files and directories can be "compressed".
897 * For all other types, only files are allowed.
899 if (!vnode_isreg(vp
) &&
900 !(decmpfs_type_is_dataless(hdr
->compression_type
) && vnode_isdir(vp
))) {
901 ret
= FILE_IS_NOT_COMPRESSED
;
904 ret
= FILE_IS_COMPRESSED
;
907 /* UF_COMPRESSED isn't on, so the file isn't compressed */
908 ret
= FILE_IS_NOT_COMPRESSED
;
911 if (((ret
== FILE_IS_COMPRESSED
) || saveInvalid
) && hdr
) {
913 * cache the uncompressed size away in the cnode
918 * we should never get here since the only place ret is set to FILE_IS_COMPRESSED
919 * is after the call to decmpfs_lock_compressed_data above
921 decmpfs_lock_compressed_data(cp
, 1);
925 if (vnode_isdir(vp
)) {
926 decmpfs_cnode_set_vnode_cached_size(cp
, 64);
927 decmpfs_cnode_set_vnode_cached_nchildren(cp
, decmpfs_get_directory_entries(hdr
));
928 if (hdr
->compression_type
== DATALESS_PKG_CMPFS_TYPE
) {
929 decmpfs_cnode_set_vnode_cached_total_size(cp
, DECMPFS_PKG_SIZE(hdr
->_size
));
932 decmpfs_cnode_set_vnode_cached_size(cp
, hdr
->uncompressed_size
);
934 decmpfs_cnode_set_vnode_state(cp
, ret
, 1);
935 decmpfs_cnode_set_vnode_cmp_type(cp
, hdr
->compression_type
, 1);
936 /* remember if the xattr's size was equal to the minimal xattr */
937 if (hdr
->attr_size
== sizeof(decmpfs_disk_header
)) {
938 decmpfs_cnode_set_vnode_minimal_xattr(cp
, 1, 1);
940 if (ret
== FILE_IS_COMPRESSED
) {
941 /* update the ubc's size for this file */
942 ubc_setsize(vp
, hdr
->uncompressed_size
);
944 /* update the decompression flags in the decmpfs cnode */
945 lck_rw_lock_shared(decompressorsLock
);
946 decmpfs_get_decompression_flags_func get_flags
= decmp_get_func(vp
, hdr
->compression_type
, get_flags
);
948 decompression_flags
= get_flags(vp
, decmpfs_ctx
, hdr
);
950 lck_rw_unlock_shared(decompressorsLock
);
951 decmpfs_cnode_set_decompression_flags(cp
, decompression_flags
);
954 /* we might have already taken the lock above; if so, skip taking it again by passing cnode_locked as the skiplock parameter */
955 decmpfs_cnode_set_vnode_state(cp
, ret
, cnode_locked
);
959 decmpfs_unlock_compressed_data(cp
, 1);
966 * Trace the following parameters on return with event-id 0x03120014.
968 * @vp->v_id: vnode-id of the file being queried.
969 * @return: set to 1 is file is compressed.
972 case FILE_IS_NOT_COMPRESSED
:
973 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED
, vp
->v_id
, 0);
975 case FILE_IS_COMPRESSED
:
976 case FILE_IS_CONVERTING
:
977 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED
, vp
->v_id
, 1);
980 /* unknown state, assume file is not compressed */
981 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED
, vp
->v_id
, 0);
982 ErrorLogWithPath("unknown ret %d\n", ret
);
988 decmpfs_update_attributes(vnode_t vp
, struct vnode_attr
*vap
)
992 if (VATTR_IS_ACTIVE(vap
, va_flags
)) {
993 /* the BSD flags are being updated */
994 if (vap
->va_flags
& UF_COMPRESSED
) {
995 /* the compressed bit is being set, did it change? */
996 struct vnode_attr va_fetch
;
998 VATTR_INIT(&va_fetch
);
999 VATTR_WANTED(&va_fetch
, va_flags
);
1000 error
= vnode_getattr(vp
, &va_fetch
, decmpfs_ctx
);
1005 old_flags
= va_fetch
.va_flags
;
1007 if (!(old_flags
& UF_COMPRESSED
)) {
1009 * Compression bit was turned on, make sure the file has the DECMPFS_XATTR_NAME attribute.
1010 * This precludes anyone from using the UF_COMPRESSED bit for anything else, and it enforces
1011 * an order of operation -- you must first do the setxattr and then the chflags.
1014 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
1016 * don't allow the caller to set the BSD flag and the size in the same call
1017 * since this doesn't really make sense
1019 vap
->va_flags
&= ~UF_COMPRESSED
;
1023 decmpfs_header
*hdr
= NULL
;
1024 error
= decmpfs_fetch_compressed_header(vp
, NULL
, &hdr
, 1);
1027 * Allow the flag to be set since the decmpfs attribute
1030 * If we're creating a dataless file we do not want to
1031 * truncate it to zero which allows the file resolver to
1032 * have more control over when truncation should happen.
1033 * All other types of compressed files are truncated to
1036 if (!decmpfs_type_is_dataless(hdr
->compression_type
)) {
1037 VATTR_SET_ACTIVE(vap
, va_data_size
);
1038 vap
->va_data_size
= 0;
1040 } else if (error
== ERANGE
) {
1041 /* the file had a decmpfs attribute but the type was out of range, so don't muck with the file's data size */
1043 /* no DECMPFS_XATTR_NAME attribute, so deny the update */
1044 vap
->va_flags
&= ~UF_COMPRESSED
;
1057 wait_for_decompress(decmpfs_cnode
*cp
)
1060 lck_mtx_lock(decompress_channel_mtx
);
1062 state
= decmpfs_fast_get_state(cp
);
1063 if (state
!= FILE_IS_CONVERTING
) {
1064 /* file is not decompressing */
1065 lck_mtx_unlock(decompress_channel_mtx
);
1068 msleep((caddr_t
)&decompress_channel
, decompress_channel_mtx
, PINOD
, "wait_for_decompress", NULL
);
1072 #pragma mark --- decmpfs hide query routines ---
1075 decmpfs_hides_rsrc(vfs_context_t ctx
, decmpfs_cnode
*cp
)
1079 * callers may (and do) pass NULL for ctx, so we should only use it
1080 * for this equality comparison
1082 * This routine should only be called after a file has already been through decmpfs_file_is_compressed
1085 if (ctx
== decmpfs_ctx
) {
1089 if (!decmpfs_fast_file_is_compressed(cp
)) {
1093 /* all compressed files hide their resource fork */
1098 decmpfs_hides_xattr(vfs_context_t ctx
, decmpfs_cnode
*cp
, const char *xattr
)
1102 * callers may (and do) pass NULL for ctx, so we should only use it
1103 * for this equality comparison
1105 * This routine should only be called after a file has already been through decmpfs_file_is_compressed
1108 if (ctx
== decmpfs_ctx
) {
1111 if (strncmp(xattr
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
) - 1) == 0) {
1112 return decmpfs_hides_rsrc(ctx
, cp
);
1114 if (!decmpfs_fast_file_is_compressed(cp
)) {
1115 /* file is not compressed, so don't hide this xattr */
1118 if (strncmp(xattr
, DECMPFS_XATTR_NAME
, sizeof(DECMPFS_XATTR_NAME
) - 1) == 0) {
1119 /* it's our xattr, so hide it */
1122 /* don't hide this xattr */
1126 #pragma mark --- registration/validation routines ---
1129 registration_valid(const decmpfs_registration
*registration
)
1131 return registration
&& ((registration
->decmpfs_registration
== DECMPFS_REGISTRATION_VERSION_V1
) || (registration
->decmpfs_registration
== DECMPFS_REGISTRATION_VERSION_V3
));
1135 register_decmpfs_decompressor(uint32_t compression_type
, const decmpfs_registration
*registration
)
1137 /* called by kexts to register decompressors */
1141 char resourceName
[80];
1143 if ((compression_type
>= CMP_MAX
) || !registration_valid(registration
)) {
1148 lck_rw_lock_exclusive(decompressorsLock
); locked
= 1;
1150 /* make sure the registration for this type is zero */
1151 if (decompressors
[compression_type
] != NULL
) {
1155 decompressors
[compression_type
] = registration
;
1156 snprintf(resourceName
, sizeof(resourceName
), "com.apple.AppleFSCompression.Type%u", compression_type
);
1157 IOServicePublishResource(resourceName
, TRUE
);
1161 lck_rw_unlock_exclusive(decompressorsLock
);
1167 unregister_decmpfs_decompressor(uint32_t compression_type
, decmpfs_registration
*registration
)
1169 /* called by kexts to unregister decompressors */
1173 char resourceName
[80];
1175 if ((compression_type
>= CMP_MAX
) || !registration_valid(registration
)) {
1180 lck_rw_lock_exclusive(decompressorsLock
); locked
= 1;
1181 if (decompressors
[compression_type
] != registration
) {
1185 decompressors
[compression_type
] = NULL
;
1186 snprintf(resourceName
, sizeof(resourceName
), "com.apple.AppleFSCompression.Type%u", compression_type
);
1187 IOServicePublishResource(resourceName
, FALSE
);
1191 lck_rw_unlock_exclusive(decompressorsLock
);
1197 compression_type_valid(vnode_t vp
, decmpfs_header
*hdr
)
1199 /* fast pre-check to determine if the given compressor has checked in */
1202 /* every compressor must have at least a fetch function */
1203 lck_rw_lock_shared(decompressorsLock
);
1204 if (decmp_get_func(vp
, hdr
->compression_type
, fetch
) != NULL
) {
1207 lck_rw_unlock_shared(decompressorsLock
);
1212 #pragma mark --- compression/decompression routines ---
1215 decmpfs_fetch_uncompressed_data(vnode_t vp
, decmpfs_cnode
*cp
, decmpfs_header
*hdr
, off_t offset
, user_ssize_t size
, int nvec
, decmpfs_vector
*vec
, uint64_t *bytes_read
)
1217 /* get the uncompressed bytes for the specified region of vp by calling out to the registered compressor */
1223 if (offset
>= (off_t
)hdr
->uncompressed_size
) {
1224 /* reading past end of file; nothing to do */
1229 /* tried to read from before start of file */
1233 if (hdr
->uncompressed_size
- offset
< size
) {
1234 /* adjust size so we don't read past the end of the file */
1235 size
= (user_ssize_t
)(hdr
->uncompressed_size
- offset
);
1238 /* nothing to read */
1244 * Trace the following parameters on entry with event-id 0x03120008.
1246 * @vp->v_id: vnode-id of the file being decompressed.
1247 * @hdr->compression_type: compression type.
1248 * @offset: offset from where to fetch uncompressed data.
1249 * @size: amount of uncompressed data to fetch.
1251 * Please NOTE: @offset and @size can overflow in theory but
1254 DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_UNCOMPRESSED_DATA
, vp
->v_id
,
1255 hdr
->compression_type
, (int)offset
, (int)size
);
1256 lck_rw_lock_shared(decompressorsLock
);
1257 decmpfs_fetch_uncompressed_data_func fetch
= decmp_get_func(vp
, hdr
->compression_type
, fetch
);
1259 err
= fetch(vp
, decmpfs_ctx
, hdr
, offset
, size
, nvec
, vec
, bytes_read
);
1260 lck_rw_unlock_shared(decompressorsLock
);
1262 uint64_t decompression_flags
= decmpfs_cnode_get_decompression_flags(cp
);
1263 if (decompression_flags
& DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS
) {
1264 #if !defined(__i386__) && !defined(__x86_64__)
1266 for (i
= 0; i
< nvec
; i
++) {
1267 assert(vec
[i
].size
>= 0 && vec
[i
].size
<= UINT_MAX
);
1268 flush_dcache64((addr64_t
)(uintptr_t)vec
[i
].buf
, (unsigned int)vec
[i
].size
, FALSE
);
1275 lck_rw_unlock_shared(decompressorsLock
);
1278 * Trace the following parameters on return with event-id 0x03120008.
1280 * @vp->v_id: vnode-id of the file being decompressed.
1281 * @bytes_read: amount of uncompressed bytes fetched in bytes.
1282 * @err: value returned from this function.
1284 * Please NOTE: @bytes_read can overflow in theory but here it is safe.
1286 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_UNCOMPRESSED_DATA
, vp
->v_id
,
1287 (int)*bytes_read
, err
);
1292 static kern_return_t
1293 commit_upl(upl_t upl
, upl_offset_t pl_offset
, size_t uplSize
, int flags
, int abort
)
1295 kern_return_t kr
= 0;
1298 upl_unmark_decmp(upl
);
1299 #endif /* CONFIG_IOSCHED */
1301 /* commit the upl pages */
1303 VerboseLog("aborting upl, flags 0x%08x\n", flags
);
1304 kr
= ubc_upl_abort_range(upl
, pl_offset
, (upl_size_t
)uplSize
, flags
);
1305 if (kr
!= KERN_SUCCESS
) {
1306 ErrorLog("ubc_upl_abort_range error %d\n", (int)kr
);
1309 VerboseLog("committing upl, flags 0x%08x\n", flags
| UPL_COMMIT_CLEAR_DIRTY
);
1310 kr
= ubc_upl_commit_range(upl
, pl_offset
, (upl_size_t
)uplSize
, flags
| UPL_COMMIT_CLEAR_DIRTY
| UPL_COMMIT_WRITTEN_BY_KERNEL
);
1311 if (kr
!= KERN_SUCCESS
) {
1312 ErrorLog("ubc_upl_commit_range error %d\n", (int)kr
);
1320 decmpfs_pagein_compressed(struct vnop_pagein_args
*ap
, int *is_compressed
, decmpfs_cnode
*cp
)
1322 /* handles a page-in request from vfs for a compressed file */
1325 vnode_t vp
= ap
->a_vp
;
1326 upl_t pl
= ap
->a_pl
;
1327 upl_offset_t pl_offset
= ap
->a_pl_offset
;
1328 off_t f_offset
= ap
->a_f_offset
;
1329 size_t size
= ap
->a_size
;
1330 int flags
= ap
->a_flags
;
1332 user_ssize_t uplSize
= 0;
1333 size_t verify_block_size
= 0;
1335 decmpfs_header
*hdr
= NULL
;
1336 uint64_t cachedSize
= 0;
1337 int cmpdata_locked
= 0;
1338 bool file_tail_page_valid
= false;
1339 int num_valid_pages
= 0;
1340 int num_invalid_pages
= 0;
1342 if (!decmpfs_trylock_compressed_data(cp
, 0)) {
1348 if (flags
& ~(UPL_IOSYNC
| UPL_NOCOMMIT
| UPL_NORDAHEAD
)) {
1349 DebugLogWithPath("pagein: unknown flags 0x%08x\n", (flags
& ~(UPL_IOSYNC
| UPL_NOCOMMIT
| UPL_NORDAHEAD
)));
1352 err
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 0);
1357 cachedSize
= hdr
->uncompressed_size
;
1359 if (!compression_type_valid(vp
, hdr
)) {
1360 /* compressor not registered */
1366 * If the verify block size is larger than the page size, the UPL needs
1367 * to be aligned to it, Since the UPL has been created by the filesystem,
1368 * we will only check if the passed in UPL length conforms to the
1369 * alignment requirements.
1371 err
= VNOP_VERIFY(vp
, f_offset
, NULL
, 0, &verify_block_size
,
1372 VNODE_VERIFY_DEFAULT
, NULL
);
1375 } else if (verify_block_size
) {
1376 if (verify_block_size
& (verify_block_size
- 1)) {
1377 ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n");
1379 } else if (size
% verify_block_size
) {
1380 ErrorLogWithPath("upl size is not a multiple of verify block size\n");
1389 /* Mark the UPL as the requesting UPL for decompression */
1391 #endif /* CONFIG_IOSCHED */
1393 /* map the upl so we can fetch into it */
1394 kern_return_t kr
= ubc_upl_map(pl
, (vm_offset_t
*)&data
);
1395 if ((kr
!= KERN_SUCCESS
) || (data
== NULL
)) {
1399 upl_unmark_decmp(pl
);
1400 #endif /* CONFIG_IOSCHED */
1407 /* clip the size to the size of the file */
1408 if ((uint64_t)uplPos
+ uplSize
> cachedSize
) {
1409 /* truncate the read to the size of the file */
1410 uplSize
= (user_ssize_t
)(cachedSize
- uplPos
);
1417 /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */
1418 vec
= (decmpfs_vector
) {
1419 .buf
= (char*)data
+ pl_offset
,
1423 uint64_t did_read
= 0;
1424 if (decmpfs_fast_get_state(cp
) == FILE_IS_CONVERTING
) {
1425 ErrorLogWithPath("unexpected pagein during decompress\n");
1427 * if the file is converting, this must be a recursive call to pagein from underneath a call to decmpfs_decompress_file;
1428 * pretend that it succeeded but don't do anything since we're just going to write over the pages anyway
1432 if (!verify_block_size
|| (verify_block_size
<= PAGE_SIZE
)) {
1433 err
= decmpfs_fetch_uncompressed_data(vp
, cp
, hdr
, uplPos
, uplSize
, 1, &vec
, &did_read
);
1435 off_t l_uplPos
= uplPos
;
1436 off_t l_pl_offset
= pl_offset
;
1437 user_ssize_t l_uplSize
= uplSize
;
1438 upl_page_info_t
*pl_info
= ubc_upl_pageinfo(pl
);
1442 * When the system page size is less than the "verify block size",
1443 * the UPL passed may not consist solely of absent pages.
1444 * We have to detect the "absent" pages and only decompress
1445 * into those absent/invalid page ranges.
1447 * Things that will change in each iteration of the loop :
1449 * l_pl_offset = where we are inside the UPL [0, caller_upl_created_size)
1450 * l_uplPos = the file offset the l_pl_offset corresponds to.
1451 * l_uplSize = the size of the upl still unprocessed;
1453 * In this picture, we have to do the transfer on 2 ranges
1454 * (One 2 page range and one 3 page range) and the loop
1455 * below will skip the first two pages and then identify
1456 * the next two as invalid and fill those in and
1457 * then skip the next one and then do the last pages.
1459 * uplPos(file_offset)
1461 * 0 V<--------------> file_size
1462 * |--------------------------------------------------->
1463 * | | |V|V|I|I|V|I|I|I|
1466 * <------------------->
1470 * uplSize will be clipped in case the UPL range exceeds
1475 uint64_t l_did_read
= 0;
1476 int pl_offset_pg
= (int)(l_pl_offset
/ PAGE_SIZE
);
1477 int pages_left_in_upl
;
1482 * l_uplSize may start off less than the size of the upl,
1483 * we have to round it up to PAGE_SIZE to calculate
1484 * how many more pages are left.
1486 pages_left_in_upl
= (int)(round_page((vm_offset_t
)l_uplSize
) / PAGE_SIZE
);
1489 * scan from the beginning of the upl looking for the first
1490 * non-valid page.... this will become the first page in
1491 * the request we're going to make to
1492 * 'decmpfs_fetch_uncompressed_data'... if all
1493 * of the pages are valid, we won't call through
1494 * to 'decmpfs_fetch_uncompressed_data'
1496 for (start_pg
= 0; start_pg
< pages_left_in_upl
; start_pg
++) {
1497 if (!upl_valid_page(pl_info
, pl_offset_pg
+ start_pg
)) {
1502 num_valid_pages
+= start_pg
;
1505 * scan from the starting invalid page looking for
1506 * a valid page before the end of the upl is
1507 * reached, if we find one, then it will be the
1508 * last page of the request to 'decmpfs_fetch_uncompressed_data'
1510 for (last_pg
= start_pg
; last_pg
< pages_left_in_upl
; last_pg
++) {
1511 if (upl_valid_page(pl_info
, pl_offset_pg
+ last_pg
)) {
1516 if (start_pg
< last_pg
) {
1517 off_t inval_offset
= start_pg
* PAGE_SIZE
;
1518 int inval_pages
= last_pg
- start_pg
;
1519 int inval_size
= inval_pages
* PAGE_SIZE
;
1520 decmpfs_vector l_vec
;
1522 num_invalid_pages
+= inval_pages
;
1524 did_read
+= inval_offset
;
1525 l_pl_offset
+= inval_offset
;
1526 l_uplPos
+= inval_offset
;
1527 l_uplSize
-= inval_offset
;
1530 l_vec
= (decmpfs_vector
) {
1531 .buf
= (char*)data
+ l_pl_offset
,
1535 err
= decmpfs_fetch_uncompressed_data(vp
, cp
, hdr
, l_uplPos
,
1536 MIN(l_uplSize
, inval_size
), 1, &l_vec
, &l_did_read
);
1538 if (!err
&& (l_did_read
!= inval_size
) && (l_uplSize
> inval_size
)) {
1539 ErrorLogWithPath("Unexpected size fetch of decompressed data, l_uplSize = %d, l_did_read = %d, inval_size = %d\n",
1540 (int)l_uplSize
, (int)l_did_read
, (int)inval_size
);
1544 /* no invalid pages left */
1545 l_did_read
= l_uplSize
;
1546 if (uplSize
< size
) {
1547 file_tail_page_valid
= true;
1555 did_read
+= l_did_read
;
1556 l_pl_offset
+= l_did_read
;
1557 l_uplPos
+= l_did_read
;
1558 l_uplSize
-= l_did_read
;
1563 DebugLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err
);
1564 int cmp_state
= decmpfs_fast_get_state(cp
);
1565 if (cmp_state
== FILE_IS_CONVERTING
) {
1566 DebugLogWithPath("cmp_state == FILE_IS_CONVERTING\n");
1567 cmp_state
= wait_for_decompress(cp
);
1568 if (cmp_state
== FILE_IS_COMPRESSED
) {
1569 DebugLogWithPath("cmp_state == FILE_IS_COMPRESSED\n");
1570 /* a decompress was attempted but it failed, let's try calling fetch again */
1574 if (cmp_state
== FILE_IS_NOT_COMPRESSED
) {
1575 DebugLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n");
1576 /* the file was decompressed after we started reading it */
1577 *is_compressed
= 0; /* instruct caller to fall back to its normal path */
1581 /* zero out whatever we didn't read, and zero out the end of the last page(s) */
1582 uint64_t total_size
= (size
+ (PAGE_SIZE
- 1)) & ~(PAGE_SIZE
- 1);
1583 if (did_read
< total_size
&& !(verify_block_size
&& err
)) {
1584 uint64_t rounded_up_did_read
= file_tail_page_valid
? (uint64_t)(round_page((vm_offset_t
)did_read
)) : did_read
;
1585 memset((char*)vec
.buf
+ rounded_up_did_read
, 0, (size_t)(total_size
- rounded_up_did_read
));
1588 if (!err
&& verify_block_size
) {
1589 size_t cur_verify_block_size
= verify_block_size
;
1591 if ((err
= VNOP_VERIFY(vp
, uplPos
, vec
.buf
, size
, &cur_verify_block_size
, 0, NULL
))) {
1592 ErrorLogWithPath("Verification failed with error %d, uplPos = %lld, uplSize = %d, did_read = %d, total_size = %d, valid_pages = %d, invalid_pages = %d, tail_page_valid = %d\n",
1593 err
, (long long)uplPos
, (int)uplSize
, (int)did_read
, (int)total_size
, num_valid_pages
, num_invalid_pages
, file_tail_page_valid
);
1595 /* XXX : If the verify block size changes, redo the read */
1599 upl_unmark_decmp(pl
);
1600 #endif /* CONFIG_IOSCHED */
1602 kr
= ubc_upl_unmap(pl
); data
= NULL
; /* make sure to set data to NULL so we don't try to unmap again below */
1603 if (kr
!= KERN_SUCCESS
) {
1604 ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr
);
1607 /* commit our pages */
1608 kr
= commit_upl(pl
, pl_offset
, (size_t)total_size
, UPL_COMMIT_FREE_ON_EMPTY
, 0);
1619 if (cmpdata_locked
) {
1620 decmpfs_unlock_compressed_data(cp
, 0);
1624 if (err
!= ENXIO
&& err
!= ENOSPC
) {
1626 MALLOC(path
, char *, PATH_MAX
, M_TEMP
, M_WAITOK
);
1627 panic("%s: decmpfs_pagein_compressed: err %d", vnpath(vp
, path
, PATH_MAX
), err
);
1631 ErrorLogWithPath("err %d\n", err
);
1637 decmpfs_read_compressed(struct vnop_read_args
*ap
, int *is_compressed
, decmpfs_cnode
*cp
)
1639 /* handles a read request from vfs for a compressed file */
1641 uio_t uio
= ap
->a_uio
;
1642 vnode_t vp
= ap
->a_vp
;
1646 user_ssize_t uplSize
= 0;
1647 user_ssize_t uplRemaining
= 0;
1648 off_t curUplPos
= 0;
1649 user_ssize_t curUplSize
= 0;
1650 kern_return_t kr
= KERN_SUCCESS
;
1653 uint64_t did_read
= 0;
1655 upl_page_info_t
*pli
= NULL
;
1656 decmpfs_header
*hdr
= NULL
;
1657 uint64_t cachedSize
= 0;
1659 user_ssize_t uioRemaining
= 0;
1660 size_t verify_block_size
= 0;
1661 size_t alignment_size
= PAGE_SIZE
;
1662 int cmpdata_locked
= 0;
1664 decmpfs_lock_compressed_data(cp
, 0); cmpdata_locked
= 1;
1666 uplPos
= uio_offset(uio
);
1667 uplSize
= uio_resid(uio
);
1668 VerboseLogWithPath("uplPos %lld uplSize %lld\n", uplPos
, uplSize
);
1670 cachedSize
= decmpfs_cnode_get_vnode_cached_size(cp
);
1672 if ((uint64_t)uplPos
+ uplSize
> cachedSize
) {
1673 /* truncate the read to the size of the file */
1674 uplSize
= (user_ssize_t
)(cachedSize
- uplPos
);
1677 /* give the cluster layer a chance to fill in whatever it already has */
1678 countInt
= (uplSize
> INT_MAX
) ? INT_MAX
: (int)uplSize
;
1679 err
= cluster_copy_ubc_data(vp
, uio
, &countInt
, 0);
1684 /* figure out what's left */
1685 uioPos
= uio_offset(uio
);
1686 uioRemaining
= uio_resid(uio
);
1687 if ((uint64_t)uioPos
+ uioRemaining
> cachedSize
) {
1688 /* truncate the read to the size of the file */
1689 uioRemaining
= (user_ssize_t
)(cachedSize
- uioPos
);
1692 if (uioRemaining
<= 0) {
1697 err
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 0);
1701 if (!compression_type_valid(vp
, hdr
)) {
1707 uplSize
= uioRemaining
;
1708 #if COMPRESSION_DEBUG
1709 DebugLogWithPath("uplPos %lld uplSize %lld\n", (uint64_t)uplPos
, (uint64_t)uplSize
);
1712 lck_rw_lock_shared(decompressorsLock
);
1713 decmpfs_adjust_fetch_region_func adjust_fetch
= decmp_get_func(vp
, hdr
->compression_type
, adjust_fetch
);
1715 /* give the compressor a chance to adjust the portion of the file that we read */
1716 adjust_fetch(vp
, decmpfs_ctx
, hdr
, &uplPos
, &uplSize
);
1717 VerboseLogWithPath("adjusted uplPos %lld uplSize %lld\n", (uint64_t)uplPos
, (uint64_t)uplSize
);
1719 lck_rw_unlock_shared(decompressorsLock
);
1721 /* clip the adjusted size to the size of the file */
1722 if ((uint64_t)uplPos
+ uplSize
> cachedSize
) {
1723 /* truncate the read to the size of the file */
1724 uplSize
= (user_ssize_t
)(cachedSize
- uplPos
);
1733 * since we're going to create a upl for the given region of the file,
1734 * make sure we're on page boundaries
1737 /* If the verify block size is larger than the page size, the UPL needs to aligned to it */
1738 err
= VNOP_VERIFY(vp
, uplPos
, NULL
, 0, &verify_block_size
, VNODE_VERIFY_DEFAULT
, NULL
);
1741 } else if (verify_block_size
) {
1742 if (verify_block_size
& (verify_block_size
- 1)) {
1743 ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n");
1744 verify_block_size
= 0;
1745 } else if (verify_block_size
> PAGE_SIZE
) {
1746 alignment_size
= verify_block_size
;
1750 if (uplPos
& (alignment_size
- 1)) {
1751 /* round position down to page boundary */
1752 uplSize
+= (uplPos
& (alignment_size
- 1));
1753 uplPos
&= ~(alignment_size
- 1);
1756 /* round size up to alignement_size multiple */
1757 uplSize
= (uplSize
+ (alignment_size
- 1)) & ~(alignment_size
- 1);
1759 VerboseLogWithPath("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos
, (uint64_t)uplSize
);
1761 uplRemaining
= uplSize
;
1765 while (uplRemaining
> 0) {
1766 /* start after the last upl */
1767 curUplPos
+= curUplSize
;
1769 /* clip to max upl size */
1770 curUplSize
= uplRemaining
;
1771 if (curUplSize
> MAX_UPL_SIZE_BYTES
) {
1772 curUplSize
= MAX_UPL_SIZE_BYTES
;
1775 /* create the upl */
1776 kr
= ubc_create_upl_kernel(vp
, curUplPos
, (int)curUplSize
, &upl
, &pli
, UPL_SET_LITE
, VM_KERN_MEMORY_FILE
);
1777 if (kr
!= KERN_SUCCESS
) {
1778 ErrorLogWithPath("ubc_create_upl error %d\n", (int)kr
);
1782 VerboseLogWithPath("curUplPos %lld curUplSize %lld\n", (uint64_t)curUplPos
, (uint64_t)curUplSize
);
1785 /* Mark the UPL as the requesting UPL for decompression */
1786 upl_mark_decmp(upl
);
1787 #endif /* CONFIG_IOSCHED */
1790 kr
= ubc_upl_map(upl
, (vm_offset_t
*)&data
);
1791 if (kr
!= KERN_SUCCESS
) {
1792 commit_upl(upl
, 0, curUplSize
, UPL_ABORT_FREE_ON_EMPTY
, 1);
1795 MALLOC(path
, char *, PATH_MAX
, M_TEMP
, M_WAITOK
);
1796 panic("%s: decmpfs_read_compressed: ubc_upl_map error %d", vnpath(vp
, path
, PATH_MAX
), (int)kr
);
1799 ErrorLogWithPath("ubc_upl_map kr=0x%x\n", (int)kr
);
1805 /* make sure the map succeeded */
1807 commit_upl(upl
, 0, curUplSize
, UPL_ABORT_FREE_ON_EMPTY
, 1);
1809 ErrorLogWithPath("ubc_upl_map mapped null\n");
1814 /* fetch uncompressed data into the mapped upl */
1817 vec
= (decmpfs_vector
){ .buf
= data
, .size
= curUplSize
};
1818 err
= decmpfs_fetch_uncompressed_data(vp
, cp
, hdr
, curUplPos
, curUplSize
, 1, &vec
, &did_read
);
1820 ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err
);
1822 /* maybe the file is converting to decompressed */
1823 int cmp_state
= decmpfs_fast_get_state(cp
);
1824 if (cmp_state
== FILE_IS_CONVERTING
) {
1825 ErrorLogWithPath("cmp_state == FILE_IS_CONVERTING\n");
1826 cmp_state
= wait_for_decompress(cp
);
1827 if (cmp_state
== FILE_IS_COMPRESSED
) {
1828 ErrorLogWithPath("cmp_state == FILE_IS_COMPRESSED\n");
1829 /* a decompress was attempted but it failed, let's try fetching again */
1833 if (cmp_state
== FILE_IS_NOT_COMPRESSED
) {
1834 ErrorLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n");
1835 /* the file was decompressed after we started reading it */
1836 abort_read
= 1; /* we're not going to commit our data */
1837 *is_compressed
= 0; /* instruct caller to fall back to its normal path */
1843 /* zero out the remainder of the last page */
1844 memset((char*)data
+ did_read
, 0, (size_t)(curUplSize
- did_read
));
1845 if (!err
&& verify_block_size
) {
1846 size_t cur_verify_block_size
= verify_block_size
;
1848 if ((err
= VNOP_VERIFY(vp
, curUplPos
, data
, curUplSize
, &cur_verify_block_size
, 0, NULL
))) {
1849 ErrorLogWithPath("Verification failed with error %d\n", err
);
1852 /* XXX : If the verify block size changes, redo the read */
1855 kr
= ubc_upl_unmap(upl
);
1856 if (kr
== KERN_SUCCESS
) {
1858 kr
= commit_upl(upl
, 0, curUplSize
, UPL_ABORT_FREE_ON_EMPTY
, 1);
1860 VerboseLogWithPath("uioPos %lld uioRemaining %lld\n", (uint64_t)uioPos
, (uint64_t)uioRemaining
);
1862 off_t uplOff
= uioPos
- curUplPos
;
1864 ErrorLogWithPath("uplOff %lld should never be negative\n", (int64_t)uplOff
);
1866 } else if (uplOff
> INT_MAX
) {
1867 ErrorLogWithPath("uplOff %lld too large\n", (int64_t)uplOff
);
1870 off_t count
= curUplPos
+ curUplSize
- uioPos
;
1872 /* this upl is entirely before the uio */
1874 if (count
> uioRemaining
) {
1875 count
= uioRemaining
;
1877 int icount
= (count
> INT_MAX
) ? INT_MAX
: (int)count
;
1878 int io_resid
= icount
;
1879 err
= cluster_copy_upl_data(uio
, upl
, (int)uplOff
, &io_resid
);
1880 int copied
= icount
- io_resid
;
1881 VerboseLogWithPath("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff
, (uint64_t)count
, (uint64_t)copied
);
1883 ErrorLogWithPath("cluster_copy_upl_data err %d\n", err
);
1886 uioRemaining
-= copied
;
1890 kr
= commit_upl(upl
, 0, curUplSize
, UPL_COMMIT_FREE_ON_EMPTY
| UPL_COMMIT_INACTIVATE
, 0);
1896 ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr
);
1899 uplRemaining
-= curUplSize
;
1907 if (cmpdata_locked
) {
1908 decmpfs_unlock_compressed_data(cp
, 0);
1910 if (err
) {/* something went wrong */
1911 ErrorLogWithPath("err %d\n", err
);
1915 #if COMPRESSION_DEBUG
1916 uplSize
= uio_resid(uio
);
1918 VerboseLogWithPath("still %lld bytes to copy\n", uplSize
);
1925 decmpfs_free_compressed_data(vnode_t vp
, decmpfs_cnode
*cp
)
1928 * call out to the decompressor to free remove any data associated with this compressed file
1929 * then delete the file's compression xattr
1931 decmpfs_header
*hdr
= NULL
;
1934 * Trace the following parameters on entry with event-id 0x03120010.
1936 * @vp->v_id: vnode-id of the file for which to free compressed data.
1938 DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FREE_COMPRESSED_DATA
, vp
->v_id
);
1940 int err
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 0);
1942 ErrorLogWithPath("decmpfs_fetch_compressed_header err %d\n", err
);
1944 lck_rw_lock_shared(decompressorsLock
);
1945 decmpfs_free_compressed_data_func free_data
= decmp_get_func(vp
, hdr
->compression_type
, free_data
);
1947 err
= free_data(vp
, decmpfs_ctx
, hdr
);
1949 /* nothing to do, so no error */
1952 lck_rw_unlock_shared(decompressorsLock
);
1955 ErrorLogWithPath("decompressor err %d\n", err
);
1959 * Trace the following parameters on return with event-id 0x03120010.
1961 * @vp->v_id: vnode-id of the file for which to free compressed data.
1962 * @err: value returned from this function.
1964 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FREE_COMPRESSED_DATA
, vp
->v_id
, err
);
1966 /* delete the xattr */
1967 err
= vn_removexattr(vp
, DECMPFS_XATTR_NAME
, 0, decmpfs_ctx
);
1979 #pragma mark --- file conversion routines ---
1982 unset_compressed_flag(vnode_t vp
)
1985 struct vnode_attr va
;
1986 int new_bsdflags
= 0;
1989 VATTR_WANTED(&va
, va_flags
);
1990 err
= vnode_getattr(vp
, &va
, decmpfs_ctx
);
1993 ErrorLogWithPath("vnode_getattr err %d\n", err
);
1995 new_bsdflags
= va
.va_flags
& ~UF_COMPRESSED
;
1998 VATTR_SET(&va
, va_flags
, new_bsdflags
);
1999 err
= vnode_setattr(vp
, &va
, decmpfs_ctx
);
2001 ErrorLogWithPath("vnode_setattr err %d\n", err
);
2008 decmpfs_decompress_file(vnode_t vp
, decmpfs_cnode
*cp
, off_t toSize
, int truncate_okay
, int skiplock
)
2010 /* convert a compressed file to an uncompressed file */
2016 uint32_t old_state
= 0;
2017 uint32_t new_state
= 0;
2018 int update_file_state
= 0;
2019 size_t allocSize
= 0;
2020 decmpfs_header
*hdr
= NULL
;
2021 int cmpdata_locked
= 0;
2022 off_t remaining
= 0;
2023 uint64_t uncompressed_size
= 0;
2026 * Trace the following parameters on entry with event-id 0x03120000.
2028 * @vp->v_id: vnode-id of the file being decompressed.
2029 * @toSize: uncompress given bytes of the file.
2030 * @truncate_okay: on error it is OK to truncate.
2031 * @skiplock: compressed data is locked, skip locking again.
2033 * Please NOTE: @toSize can overflow in theory but here it is safe.
2035 DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_DECOMPRESS_FILE
, vp
->v_id
,
2036 (int)toSize
, truncate_okay
, skiplock
);
2039 decmpfs_lock_compressed_data(cp
, 1); cmpdata_locked
= 1;
2043 old_state
= decmpfs_fast_get_state(cp
);
2045 switch (old_state
) {
2046 case FILE_IS_NOT_COMPRESSED
:
2048 /* someone else decompressed the file */
2053 case FILE_TYPE_UNKNOWN
:
2055 /* the file is in an unknown state, so update the state and retry */
2056 (void)decmpfs_file_is_compressed(vp
, cp
);
2062 case FILE_IS_COMPRESSED
:
2064 /* the file is compressed, so decompress it */
2071 * this shouldn't happen since multiple calls to decmpfs_decompress_file lock each other out,
2072 * and when decmpfs_decompress_file returns, the state should be always be set back to
2073 * FILE_IS_NOT_COMPRESSED or FILE_IS_UNKNOWN
2080 err
= decmpfs_fetch_compressed_header(vp
, cp
, &hdr
, 0);
2085 uncompressed_size
= hdr
->uncompressed_size
;
2087 toSize
= hdr
->uncompressed_size
;
2091 /* special case truncating the file to zero bytes */
2093 } else if ((uint64_t)toSize
> hdr
->uncompressed_size
) {
2094 /* the caller is trying to grow the file, so we should decompress all the data */
2095 toSize
= hdr
->uncompressed_size
;
2098 allocSize
= MIN(64 * 1024, (size_t)toSize
);
2099 MALLOC(data
, char *, allocSize
, M_TEMP
, M_WAITOK
);
2105 uio_w
= uio_create(1, 0LL, UIO_SYSSPACE
, UIO_WRITE
);
2110 uio_w
->uio_flags
|= UIO_FLAGS_IS_COMPRESSED_FILE
;
2114 /* tell the buffer cache that this is an empty file */
2117 /* if we got here, we need to decompress the file */
2118 decmpfs_cnode_set_vnode_state(cp
, FILE_IS_CONVERTING
, 1);
2120 while (remaining
> 0) {
2121 /* loop decompressing data from the file and writing it into the data fork */
2123 uint64_t bytes_read
= 0;
2124 decmpfs_vector vec
= { .buf
= data
, .size
= (user_ssize_t
)MIN(allocSize
, remaining
) };
2125 err
= decmpfs_fetch_uncompressed_data(vp
, cp
, hdr
, offset
, vec
.size
, 1, &vec
, &bytes_read
);
2127 ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err
);
2131 if (bytes_read
== 0) {
2132 /* we're done reading data */
2136 uio_reset(uio_w
, offset
, UIO_SYSSPACE
, UIO_WRITE
);
2137 err
= uio_addiov(uio_w
, CAST_USER_ADDR_T(data
), (user_size_t
)bytes_read
);
2139 ErrorLogWithPath("uio_addiov err %d\n", err
);
2144 err
= VNOP_WRITE(vp
, uio_w
, 0, decmpfs_ctx
);
2146 /* if the write failed, truncate the file to zero bytes */
2147 ErrorLogWithPath("VNOP_WRITE err %d\n", err
);
2150 offset
+= bytes_read
;
2151 remaining
-= bytes_read
;
2155 if (offset
!= toSize
) {
2156 ErrorLogWithPath("file decompressed to %lld instead of %lld\n", offset
, toSize
);
2163 /* sync the data and metadata */
2164 err
= VNOP_FSYNC(vp
, MNT_WAIT
, decmpfs_ctx
);
2166 ErrorLogWithPath("VNOP_FSYNC err %d\n", err
);
2172 /* write, setattr, or fsync failed */
2173 ErrorLogWithPath("aborting decompress, err %d\n", err
);
2174 if (truncate_okay
) {
2175 /* truncate anything we might have written */
2176 int error
= vnode_setsize(vp
, 0, 0, decmpfs_ctx
);
2177 ErrorLogWithPath("vnode_setsize err %d\n", error
);
2183 /* if we're truncating the file to zero bytes, we'll skip ahead to here */
2185 /* unset the compressed flag */
2186 unset_compressed_flag(vp
);
2188 /* free the compressed data associated with this file */
2189 err
= decmpfs_free_compressed_data(vp
, cp
);
2191 ErrorLogWithPath("decmpfs_free_compressed_data err %d\n", err
);
2195 * even if free_compressed_data or vnode_getattr/vnode_setattr failed, return success
2196 * since we succeeded in writing all of the file data to the data fork
2200 /* if we got this far, the file was successfully decompressed */
2201 update_file_state
= 1;
2202 new_state
= FILE_IS_NOT_COMPRESSED
;
2204 #if COMPRESSION_DEBUG
2206 uint64_t filesize
= 0;
2207 vnsize(vp
, &filesize
);
2208 DebugLogWithPath("new file size %lld\n", filesize
);
2224 /* if there was a failure, reset compression flags to unknown and clear the buffer cache data */
2225 update_file_state
= 1;
2226 new_state
= FILE_TYPE_UNKNOWN
;
2227 if (uncompressed_size
) {
2229 ubc_setsize(vp
, uncompressed_size
);
2233 if (update_file_state
) {
2234 lck_mtx_lock(decompress_channel_mtx
);
2235 decmpfs_cnode_set_vnode_state(cp
, new_state
, 1);
2236 wakeup((caddr_t
)&decompress_channel
); /* wake up anyone who might have been waiting for decompression */
2237 lck_mtx_unlock(decompress_channel_mtx
);
2240 if (cmpdata_locked
) {
2241 decmpfs_unlock_compressed_data(cp
, 1);
2244 * Trace the following parameters on return with event-id 0x03120000.
2246 * @vp->v_id: vnode-id of the file being decompressed.
2247 * @err: value returned from this function.
2249 DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_DECOMPRESS_FILE
, vp
->v_id
, err
);
2253 #pragma mark --- Type1 compressor ---
2256 * The "Type1" compressor stores the data fork directly in the compression xattr
2260 decmpfs_validate_compressed_file_Type1(__unused vnode_t vp
, __unused vfs_context_t ctx
, decmpfs_header
*hdr
)
2264 if (hdr
->uncompressed_size
+ sizeof(decmpfs_disk_header
) != (uint64_t)hdr
->attr_size
) {
2273 decmpfs_fetch_uncompressed_data_Type1(__unused vnode_t vp
, __unused vfs_context_t ctx
, decmpfs_header
*hdr
, off_t offset
, user_ssize_t size
, int nvec
, decmpfs_vector
*vec
, uint64_t *bytes_read
)
2277 user_ssize_t remaining
;
2279 if (hdr
->uncompressed_size
+ sizeof(decmpfs_disk_header
) != (uint64_t)hdr
->attr_size
) {
2284 #if COMPRESSION_DEBUG
2285 static int dummy
= 0; // prevent syslog from coalescing printfs
2286 DebugLogWithPath("%d memcpy %lld at %lld\n", dummy
++, size
, (uint64_t)offset
);
2290 for (i
= 0; (i
< nvec
) && (remaining
> 0); i
++) {
2291 user_ssize_t curCopy
= vec
[i
].size
;
2292 if (curCopy
> remaining
) {
2293 curCopy
= remaining
;
2295 memcpy(vec
[i
].buf
, hdr
->attr_bytes
+ offset
, curCopy
);
2297 remaining
-= curCopy
;
2300 if ((bytes_read
) && (err
== 0)) {
2301 *bytes_read
= (size
- remaining
);
2308 SECURITY_READ_ONLY_EARLY(static decmpfs_registration
) Type1Reg
=
2310 .decmpfs_registration
= DECMPFS_REGISTRATION_VERSION
,
2311 .validate
= decmpfs_validate_compressed_file_Type1
,
2312 .adjust_fetch
= NULL
,/* no adjust necessary */
2313 .fetch
= decmpfs_fetch_uncompressed_data_Type1
,
2314 .free_data
= NULL
,/* no free necessary */
2315 .get_flags
= NULL
/* no flags */
2318 #pragma mark --- decmpfs initialization ---
2323 static int done
= 0;
2328 decmpfs_ctx
= vfs_context_create(vfs_context_kernel());
2330 lck_grp_attr_t
*attr
= lck_grp_attr_alloc_init();
2331 decmpfs_lockgrp
= lck_grp_alloc_init("VFSCOMP", attr
);
2332 lck_grp_attr_free(attr
);
2333 decompressorsLock
= lck_rw_alloc_init(decmpfs_lockgrp
, NULL
);
2334 decompress_channel_mtx
= lck_mtx_alloc_init(decmpfs_lockgrp
, NULL
);
2336 register_decmpfs_decompressor(CMP_Type1
, &Type1Reg
);
2340 #endif /* FS_COMPRESSION */