2 * Copyright (c) 2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/cpu_data.h>
30 #include <kern/cpu_number.h>
31 #include <kern/host.h>
33 #include <mach/host_priv.h>
34 #include <mach/host_special_ports.h>
35 #include <mach/host_info.h>
36 #include <mach/iocompressionstats_notification_server.h>
37 #include <mach/mach_host.h>
39 #include <sys/mount_internal.h>
40 #include <sys/param.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43 #include <sys/vnode_internal.h>
45 #include <vfs/vfs_io_compression_stats.h>
48 #include <vm/vm_compressor_algorithms.h>
49 #include <vm/vm_protos.h>
52 int io_compression_stats_enable
= 0;
53 int io_compression_stats_block_size
= IO_COMPRESSION_STATS_DEFAULT_BLOCK_SIZE
;
55 #define LZ4_SCRATCH_ALIGN (64)
57 uint8_t lz4state
[lz4_encode_scratch_size
]__attribute((aligned(LZ4_SCRATCH_ALIGN
)));
58 } lz4_encode_scratch_t
;
60 lz4_encode_scratch_t
**per_cpu_scratch_buf
;
61 uint8_t **per_cpu_compression_buf
;
62 uint32_t io_compression_stats_cpu_count
;
63 char *vnpath_scratch_buf
;
65 LCK_GRP_DECLARE(io_compression_stats_lckgrp
, "io_compression_stats");
66 LCK_RW_DECLARE(io_compression_stats_lock
, &io_compression_stats_lckgrp
);
67 LCK_MTX_DECLARE(iocs_store_buffer_lock
, &io_compression_stats_lckgrp
);
69 typedef enum io_compression_stats_allocate_type
{
70 IO_COMPRESSION_STATS_NEW_ALLOC
= 0,
71 IO_COMPRESSION_STATS_RESIZE
= 1
72 } io_compression_stats_alloc_type_t
;
74 static void io_compression_stats_deallocate_compression_buffers(void);
76 struct iocs_store_buffer iocs_store_buffer
= {
78 .current_position
= 0,
82 int iocs_sb_bytes_since_last_mark
= 0;
83 int iocs_sb_bytes_since_last_notification
= 0;
85 ZONE_DECLARE(io_compression_stats_zone
, "io_compression_stats",
86 sizeof(struct io_compression_stats
), ZC_NOENCRYPT
| ZC_NOGC
| ZC_ZFREE_CLEARMEM
);
89 io_compression_stats_allocate_compression_buffers(io_compression_stats_alloc_type_t alloc_type
, uint32_t block_size
)
92 host_basic_info_data_t hinfo
;
93 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
95 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
97 io_compression_stats_cpu_count
= hinfo
.max_cpus
;
98 if (alloc_type
== IO_COMPRESSION_STATS_NEW_ALLOC
) {
99 assert(per_cpu_scratch_buf
== NULL
);
100 per_cpu_scratch_buf
= kheap_alloc(KHEAP_DEFAULT
, sizeof(lz4_encode_scratch_t
*) * io_compression_stats_cpu_count
, Z_ZERO
);
101 if (per_cpu_scratch_buf
== NULL
) {
105 assert(per_cpu_compression_buf
== NULL
);
106 per_cpu_compression_buf
= kheap_alloc(KHEAP_DEFAULT
, sizeof(uint8_t *) * io_compression_stats_cpu_count
, Z_ZERO
);
107 if (per_cpu_compression_buf
== NULL
) {
112 for (uint32_t cpu
= 0; cpu
< io_compression_stats_cpu_count
; cpu
++) {
113 if (alloc_type
== IO_COMPRESSION_STATS_NEW_ALLOC
) {
114 per_cpu_scratch_buf
[cpu
] = kheap_alloc(KHEAP_DEFAULT
, sizeof(lz4_encode_scratch_t
), Z_ZERO
);
115 if (per_cpu_scratch_buf
[cpu
] == NULL
) {
120 kheap_free_addr(KHEAP_DEFAULT
, per_cpu_compression_buf
[cpu
]);
122 per_cpu_compression_buf
[cpu
] = kheap_alloc(KHEAP_DEFAULT
, block_size
, Z_ZERO
);
123 if (per_cpu_compression_buf
[cpu
] == NULL
) {
128 bzero(&iocs_store_buffer
, sizeof(struct iocs_store_buffer
));
129 iocs_store_buffer
.buffer
= kheap_alloc(KHEAP_DEFAULT
, IOCS_STORE_BUFFER_SIZE
, Z_ZERO
);
130 if (iocs_store_buffer
.buffer
== NULL
) {
134 iocs_store_buffer
.current_position
= 0;
135 iocs_store_buffer
.marked_point
= 0;
137 assert(vnpath_scratch_buf
== NULL
);
138 vnpath_scratch_buf
= kheap_alloc(KHEAP_DEFAULT
, MAXPATHLEN
, Z_ZERO
);
139 if (vnpath_scratch_buf
== NULL
) {
146 /* In case of any error, irrespective of whether it is new alloc or resize,
147 * dellocate all buffers and fail */
148 io_compression_stats_deallocate_compression_buffers();
154 io_compression_stats_deallocate_compression_buffers()
157 if (per_cpu_compression_buf
!= NULL
) {
158 for (cpu
= 0; cpu
< io_compression_stats_cpu_count
; cpu
++) {
159 if (per_cpu_compression_buf
[cpu
] != NULL
) {
160 kheap_free_addr(KHEAP_DEFAULT
, per_cpu_compression_buf
[cpu
]);
161 per_cpu_compression_buf
[cpu
] = NULL
;
164 kheap_free_addr(KHEAP_DEFAULT
, per_cpu_compression_buf
);
165 per_cpu_compression_buf
= NULL
;
168 if (per_cpu_scratch_buf
!= NULL
) {
169 for (cpu
= 0; cpu
< io_compression_stats_cpu_count
; cpu
++) {
170 if (per_cpu_scratch_buf
[cpu
] != NULL
) {
171 kheap_free_addr(KHEAP_DEFAULT
, per_cpu_scratch_buf
[cpu
]);
172 per_cpu_scratch_buf
[cpu
] = NULL
;
175 kheap_free_addr(KHEAP_DEFAULT
, per_cpu_scratch_buf
);
176 per_cpu_scratch_buf
= NULL
;
179 if (iocs_store_buffer
.buffer
!= NULL
) {
180 kheap_free_addr(KHEAP_DEFAULT
, iocs_store_buffer
.buffer
);
181 bzero(&iocs_store_buffer
, sizeof(struct iocs_store_buffer
));
184 iocs_sb_bytes_since_last_mark
= 0;
185 iocs_sb_bytes_since_last_notification
= 0;
187 if (vnpath_scratch_buf
!= NULL
) {
188 kheap_free_addr(KHEAP_DEFAULT
, vnpath_scratch_buf
);
189 vnpath_scratch_buf
= NULL
;
195 sysctl_io_compression_stats_enable SYSCTL_HANDLER_ARGS
197 #pragma unused (arg1, arg2, oidp)
202 error
= SYSCTL_OUT(req
, &io_compression_stats_enable
, sizeof(int));
204 if (error
|| !req
->newptr
) {
208 error
= SYSCTL_IN(req
, &enable
, sizeof(int));
213 if (!((enable
== 1) || (enable
== 0))) {
217 lck_rw_lock_exclusive(&io_compression_stats_lock
);
218 lck_mtx_lock(&iocs_store_buffer_lock
);
219 if ((io_compression_stats_enable
== 0) && (enable
== 1)) {
220 /* Enabling collection of stats. Allocate appropriate buffers */
221 error
= io_compression_stats_allocate_compression_buffers(IO_COMPRESSION_STATS_NEW_ALLOC
, io_compression_stats_block_size
);
223 io_compression_stats_enable
= enable
;
224 io_compression_stats_dbg("SUCCESS: setting io_compression_stats_enable to %d", io_compression_stats_enable
);
226 io_compression_stats_dbg("FAILED: setting io_compression_stats_enable to %d", io_compression_stats_enable
);
228 } else if ((io_compression_stats_enable
== 1) && (enable
== 0)) {
229 io_compression_stats_deallocate_compression_buffers();
230 io_compression_stats_enable
= 0;
231 io_compression_stats_dbg("SUCCESS: setting io_compression_stats_enable to %d", io_compression_stats_enable
);
233 lck_mtx_unlock(&iocs_store_buffer_lock
);
234 lck_rw_unlock_exclusive(&io_compression_stats_lock
);
238 SYSCTL_PROC(_vfs
, OID_AUTO
, io_compression_stats_enable
, CTLTYPE_INT
| CTLFLAG_RW
, 0, 0, &sysctl_io_compression_stats_enable
, "I", "");
241 sysctl_io_compression_block_size SYSCTL_HANDLER_ARGS
243 #pragma unused (arg1, arg2, oidp)
246 int block_size
= io_compression_stats_block_size
;
248 error
= SYSCTL_OUT(req
, &block_size
, sizeof(int));
250 if (error
|| !req
->newptr
) {
254 error
= SYSCTL_IN(req
, &block_size
, sizeof(int));
259 if (block_size
< IO_COMPRESSION_STATS_MIN_BLOCK_SIZE
|| block_size
> IO_COMPRESSION_STATS_MAX_BLOCK_SIZE
) {
263 lck_rw_lock_exclusive(&io_compression_stats_lock
);
265 if (io_compression_stats_block_size
!= block_size
) {
266 if (io_compression_stats_enable
== 1) {
267 /* IO compression stats is enabled, rellocate buffers. */
268 error
= io_compression_stats_allocate_compression_buffers(IO_COMPRESSION_STATS_RESIZE
, block_size
);
270 io_compression_stats_block_size
= block_size
;
271 io_compression_stats_dbg("SUCCESS: setting io_compression_stats_block_size to %d", io_compression_stats_block_size
);
273 /* Failed to allocate buffers, disable IO compression stats */
274 io_compression_stats_enable
= 0;
275 io_compression_stats_dbg("Failed: setting io_compression_stats_block_size to %d", io_compression_stats_block_size
);
278 /* IO compression stats is disabled, only set the io_compression_stats_block_size */
279 io_compression_stats_block_size
= block_size
;
280 io_compression_stats_dbg("SUCCESS: setting io_compression_stats_block_size to %d", io_compression_stats_block_size
);
283 lck_rw_unlock_exclusive(&io_compression_stats_lock
);
288 SYSCTL_PROC(_vfs
, OID_AUTO
, io_compression_stats_block_size
, CTLTYPE_INT
| CTLFLAG_RW
, 0, 0, &sysctl_io_compression_block_size
, "I", "");
292 iocs_compress_block(uint8_t *block_ptr
, uint32_t block_size
)
294 disable_preemption();
296 uint32_t current_cpu
= cpu_number();
297 if (!(current_cpu
< io_compression_stats_cpu_count
)) {
302 lz4_encode_scratch_t
*scratch_buf
= per_cpu_scratch_buf
[current_cpu
];
303 uint8_t *dest_buf
= per_cpu_compression_buf
[current_cpu
];
305 int compressed_block_size
= (int) lz4raw_encode_buffer(dest_buf
, block_size
,
306 block_ptr
, block_size
, (lz4_hash_entry_t
*) scratch_buf
);
310 return compressed_block_size
;
313 * Compress buf in chunks of io_compression_stats_block_size
316 iocs_compress_buffer(vnode_t vn
, uint8_t *buf_ptr
, uint32_t buf_size
)
319 uint32_t compressed_size
= 0;
320 int block_size
= io_compression_stats_block_size
;
321 int block_stats_scaling_factor
= block_size
/ IOCS_BLOCK_NUM_SIZE_BUCKETS
;
323 for (offset
= 0; offset
< buf_size
; offset
+= block_size
) {
324 int current_block_size
= min(block_size
, buf_size
- offset
);
325 int current_compressed_block_size
= iocs_compress_block(buf_ptr
+ offset
, current_block_size
);
327 if (current_compressed_block_size
== 0) {
328 compressed_size
+= current_block_size
;
329 vnode_updateiocompressionblockstats(vn
, current_block_size
/ block_stats_scaling_factor
);
330 } else if (current_compressed_block_size
!= -1) {
331 compressed_size
+= current_compressed_block_size
;
332 vnode_updateiocompressionblockstats(vn
, current_compressed_block_size
/ block_stats_scaling_factor
);
336 return compressed_size
;
342 return 31 - __builtin_clz(x
);
346 * Once we get the IO compression stats for the entire buffer, we update buffer_size_compressibility_dist,
347 * which helps us observe distribution across various io sizes and compression factors.
348 * The goal of next two functions is to get the index in this buffer_size_compressibility_dist table.
352 * Maps IO size to a bucket between 0 - IO_COMPRESSION_STATS_MAX_SIZE_BUCKET
353 * for size < 4096 returns 0 and size > 1MB returns IO_COMPRESSION_STATS_MAX_SIZE_BUCKET (9).
354 * For IO sizes in-between we arrive at the index based on log2 function.
355 * sizes 4097 - 8192 => index = 1,
356 * sizes 8193 - 16384 => index = 2, and so on
358 #define SIZE_COMPRESSION_DIST_SIZE_BUCKET_MIN 4096
359 #define SIZE_COMPRESSION_DIST_SIZE_BUCKET_MAX (1024 * 1024)
361 get_buffer_size_bucket(uint32_t size
)
363 if (size
<= SIZE_COMPRESSION_DIST_SIZE_BUCKET_MIN
) {
366 if (size
> SIZE_COMPRESSION_DIST_SIZE_BUCKET_MAX
) {
367 return IOCS_BUFFER_MAX_BUCKET
;
369 #define IOCS_INDEX_MAP_OFFSET 11
370 return log2down(size
- 1) - IOCS_INDEX_MAP_OFFSET
;
374 * Maps compression factor to a bucket between 0 - IO_COMPRESSION_STATS_MAX_COMPRESSION_BUCKET
377 get_buffer_compressibility_bucket(uint32_t uncompressed_size
, uint32_t compressed_size
)
379 int saved_space_pc
= (uncompressed_size
- compressed_size
) * 100 / uncompressed_size
;
381 if (saved_space_pc
< 0) {
385 /* saved_space_pc lies bw 0 - 100. log2(saved_space_pc) lies bw 0 - 6 */
386 return log2down(saved_space_pc
);
390 io_compression_stats(buf_t bp
)
392 uint8_t *buf_ptr
= NULL
;
393 int bflags
= bp
->b_flags
;
394 uint32_t compressed_size
= 0;
395 uint32_t buf_cnt
= buf_count(bp
);
396 uint64_t duration
= 0;
397 caddr_t vaddr
= NULL
;
398 vnode_t vn
= buf_vnode(bp
);
401 if ((io_compression_stats_enable
!= 1) || (bflags
& B_READ
) || (buf_cnt
<= 0)) {
405 if (!lck_rw_try_lock_shared(&io_compression_stats_lock
)) {
406 /* sysctl modifying IO compression stats parameters is in progress.
407 * Don't block, since malloc might be in progress. */
410 /* re-check io_compression_stats_enable with lock */
411 if (io_compression_stats_enable
!= 1) {
415 err
= buf_map(bp
, &vaddr
);
417 buf_ptr
= (uint8_t *) vaddr
;
420 if (buf_ptr
!= NULL
) {
421 int64_t start
= mach_absolute_time();
422 compressed_size
= iocs_compress_buffer(vn
, buf_ptr
, buf_cnt
);
423 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &duration
);
425 if (compressed_size
!= 0) {
426 vnode_updateiocompressionbufferstats(vn
, buf_cnt
, compressed_size
,
427 get_buffer_size_bucket(buf_cnt
),
428 get_buffer_compressibility_bucket(buf_cnt
, compressed_size
));
432 KDBG_RELEASE(FSDBG_CODE(DBG_VFS
, DBG_VFS_IO_COMPRESSION_STATS
) | DBG_FUNC_NONE
,
433 duration
, io_compression_stats_block_size
, compressed_size
, buf_cnt
, 0);
436 lck_rw_unlock_shared(&io_compression_stats_lock
);
437 if (buf_ptr
!= NULL
) {
443 iocs_notify_user(void)
445 mach_port_t user_port
= MACH_PORT_NULL
;
446 kern_return_t kr
= host_get_iocompressionstats_port(host_priv_self(), &user_port
);
447 if ((kr
!= KERN_SUCCESS
) || !IPC_PORT_VALID(user_port
)) {
450 iocompressionstats_notification(user_port
, 0);
451 ipc_port_release_send(user_port
);
454 construct_iocs_sbe_from_vnode(struct vnode
*vp
, struct iocs_store_buffer_entry
*iocs_sbe
)
456 int path_len
= MAXPATHLEN
;
458 vn_getpath(vp
, vnpath_scratch_buf
, &path_len
);
460 * Total path length is path_len, we can copy out IOCS_SBE_PATH_LEN bytes. We are interested
461 * in first segment of the path to try and figure out the process writing to the file, and we are
462 * interested in the last segment to figure out extention. So, in cases where
463 * IOCS_SBE_PATH_LEN < path_len, lets copy out first IOCS_PATH_START_BYTES_TO_COPY bytes and
464 * last IOCS_PATH_END_BYTES_TO_COPY (last segment includes the null character).
466 if (path_len
> IOCS_SBE_PATH_LEN
) {
467 strncpy(iocs_sbe
->path_name
, vnpath_scratch_buf
, IOCS_PATH_START_BYTES_TO_COPY
);
468 strncpy(iocs_sbe
->path_name
+ IOCS_PATH_START_BYTES_TO_COPY
,
469 vnpath_scratch_buf
+ path_len
- IOCS_PATH_END_BYTES_TO_COPY
,
470 IOCS_PATH_END_BYTES_TO_COPY
);
472 strncpy(iocs_sbe
->path_name
, vnpath_scratch_buf
, IOCS_SBE_PATH_LEN
);
474 memcpy(&iocs_sbe
->iocs
, vp
->io_compression_stats
, sizeof(struct io_compression_stats
));
477 vnode_iocs_record_and_free(struct vnode
*vp
)
480 struct iocs_store_buffer_entry
*iocs_sbe
= NULL
;
482 if (!lck_mtx_try_lock(&iocs_store_buffer_lock
)) {
486 if (iocs_store_buffer
.buffer
== NULL
) {
490 assert(iocs_store_buffer
.current_position
+ sizeof(struct iocs_store_buffer_entry
) <= IOCS_STORE_BUFFER_SIZE
);
492 iocs_sbe
= (struct iocs_store_buffer_entry
*)(iocs_store_buffer
.buffer
+ iocs_store_buffer
.current_position
);
494 construct_iocs_sbe_from_vnode(vp
, iocs_sbe
);
496 iocs_store_buffer
.current_position
+= sizeof(struct iocs_store_buffer_entry
);
498 if (iocs_store_buffer
.current_position
+ sizeof(struct iocs_store_buffer_entry
) > IOCS_STORE_BUFFER_SIZE
) {
499 /* We've reached end of the buffer, move back to the top */
500 iocs_store_buffer
.current_position
= 0;
503 iocs_sb_bytes_since_last_mark
+= sizeof(struct iocs_store_buffer_entry
);
504 iocs_sb_bytes_since_last_notification
+= sizeof(struct iocs_store_buffer_entry
);
506 if ((iocs_sb_bytes_since_last_mark
> IOCS_STORE_BUFFER_NOTIFY_AT
) &&
507 (iocs_sb_bytes_since_last_notification
> IOCS_STORE_BUFFER_NOTIFICATION_INTERVAL
)) {
509 iocs_sb_bytes_since_last_notification
= 0;
513 lck_mtx_unlock(&iocs_store_buffer_lock
);
515 /* We need to free io_compression_stats whether or not we were able to record it */
516 bzero(vp
->io_compression_stats
, sizeof(struct io_compression_stats
));
517 zfree(io_compression_stats_zone
, vp
->io_compression_stats
);
518 vp
->io_compression_stats
= NULL
;
524 struct vnode_iocs_context
{
525 struct sysctl_req
*addr
;
530 vnode_iocs_callback(struct vnode
*vp
, void *vctx
)
532 struct vnode_iocs_context
*ctx
= vctx
;
533 struct sysctl_req
*req
= ctx
->addr
;
534 int current_ptr
= ctx
->current_ptr
;
536 if (current_ptr
+ sizeof(struct iocs_store_buffer_entry
) < req
->oldlen
) {
537 if (vp
->io_compression_stats
!= NULL
) {
538 construct_iocs_sbe_from_vnode(vp
, (struct iocs_store_buffer_entry
*) (req
->oldptr
+ current_ptr
));
539 current_ptr
+= sizeof(struct iocs_store_buffer_entry
);
542 return VNODE_RETURNED_DONE
;
544 ctx
->current_ptr
= current_ptr
;
546 return VNODE_RETURNED
;
550 vfs_iocs_callback(mount_t mp
, void *arg
)
552 if (mp
->mnt_flag
& MNT_LOCAL
) {
553 vnode_iterate(mp
, VNODE_ITERATE_ALL
, vnode_iocs_callback
, arg
);
559 extern long numvnodes
;
562 sysctl_io_compression_dump_stats SYSCTL_HANDLER_ARGS
564 #pragma unused (arg1, arg2, oidp)
567 uint32_t inp_flag
= 0;
570 if (io_compression_stats_enable
== 0) {
575 if ((req
->newptr
!= USER_ADDR_NULL
) && (req
->newlen
== sizeof(uint32_t))) {
576 error
= SYSCTL_IN(req
, &inp_flag
, sizeof(uint32_t));
581 case IOCS_SYSCTL_LIVE
:
582 case IOCS_SYSCTL_STORE_BUFFER_RD_ONLY
:
583 case IOCS_SYSCTL_STORE_BUFFER_MARK
:
594 if (req
->oldptr
== USER_ADDR_NULL
) {
595 /* Query to figure out size of the buffer */
596 if (inp_flag
& IOCS_SYSCTL_LIVE
) {
597 req
->oldidx
= numvnodes
* sizeof(struct iocs_store_buffer_entry
);
599 /* Buffer size for archived case, let's keep it
600 * simple and return IOCS store buffer size */
601 req
->oldidx
= IOCS_STORE_BUFFER_SIZE
;
606 if (inp_flag
& IOCS_SYSCTL_LIVE
) {
607 struct vnode_iocs_context ctx
;
609 bzero(&ctx
, sizeof(struct vnode_iocs_context
));
611 vfs_iterate(0, vfs_iocs_callback
, &ctx
);
612 req
->oldidx
= ctx
.current_ptr
;
616 /* reading from store buffer */
617 lck_mtx_lock(&iocs_store_buffer_lock
);
619 if (iocs_store_buffer
.buffer
== NULL
) {
623 if (iocs_sb_bytes_since_last_mark
== 0) {
628 int expected_size
= 0;
629 /* Dry run to figure out amount of space required to copy out the
630 * iocs_store_buffer.buffer */
631 if (iocs_store_buffer
.marked_point
< iocs_store_buffer
.current_position
) {
632 expected_size
= iocs_store_buffer
.current_position
- iocs_store_buffer
.marked_point
;
634 expected_size
= IOCS_STORE_BUFFER_SIZE
- iocs_store_buffer
.marked_point
;
635 expected_size
+= iocs_store_buffer
.current_position
;
638 if (req
->oldlen
< expected_size
) {
644 if (iocs_store_buffer
.marked_point
< iocs_store_buffer
.current_position
) {
645 error
= copyout(iocs_store_buffer
.buffer
+ iocs_store_buffer
.marked_point
,
647 iocs_store_buffer
.current_position
- iocs_store_buffer
.marked_point
);
652 ret_len
= iocs_store_buffer
.current_position
- iocs_store_buffer
.marked_point
;
654 error
= copyout(iocs_store_buffer
.buffer
+ iocs_store_buffer
.marked_point
,
656 IOCS_STORE_BUFFER_SIZE
- iocs_store_buffer
.marked_point
);
661 ret_len
= IOCS_STORE_BUFFER_SIZE
- iocs_store_buffer
.marked_point
;
663 error
= copyout(iocs_store_buffer
.buffer
,
664 req
->oldptr
+ ret_len
,
665 iocs_store_buffer
.current_position
);
670 ret_len
+= iocs_store_buffer
.current_position
;
673 req
->oldidx
= ret_len
;
674 if ((ret_len
!= 0) && (inp_flag
& IOCS_SYSCTL_STORE_BUFFER_MARK
)) {
675 iocs_sb_bytes_since_last_mark
= 0;
676 iocs_store_buffer
.marked_point
= iocs_store_buffer
.current_position
;
679 lck_mtx_unlock(&iocs_store_buffer_lock
);
684 SYSCTL_PROC(_vfs
, OID_AUTO
, io_compression_dump_stats
, CTLFLAG_WR
| CTLTYPE_NODE
, 0, 0, sysctl_io_compression_dump_stats
, "-", "");
687 vnode_updateiocompressionblockstats(vnode_t vp
, uint32_t size_bucket
)
693 if (size_bucket
>= IOCS_BLOCK_NUM_SIZE_BUCKETS
) {
697 if (vp
->io_compression_stats
== NULL
) {
698 io_compression_stats_t iocs
= (io_compression_stats_t
)zalloc_flags(io_compression_stats_zone
, Z_ZERO
);
703 /* Re-check with lock */
704 if (vp
->io_compression_stats
== NULL
) {
705 vp
->io_compression_stats
= iocs
;
707 zfree(io_compression_stats_zone
, iocs
);
711 OSIncrementAtomic((SInt32
*)&vp
->io_compression_stats
->block_compressed_size_dist
[size_bucket
]);
716 vnode_updateiocompressionbufferstats(__unused vnode_t vp
, __unused
uint64_t uncompressed_size
, __unused
uint64_t compressed_size
, __unused
uint32_t size_bucket
, __unused
uint32_t compression_bucket
)
722 /* vnode_updateiocompressionblockstats will always be called before vnode_updateiocompressionbufferstats.
723 * Hence vp->io_compression_stats should already be allocated */
724 if (vp
->io_compression_stats
== NULL
) {
728 if ((size_bucket
>= IOCS_BUFFER_NUM_SIZE_BUCKETS
) || (compression_bucket
>= IOCS_BUFFER_NUM_COMPRESSION_BUCKETS
)) {
732 OSAddAtomic64(uncompressed_size
, &vp
->io_compression_stats
->uncompressed_size
);
733 OSAddAtomic64(compressed_size
, &vp
->io_compression_stats
->compressed_size
);
735 OSIncrementAtomic((SInt32
*)&vp
->io_compression_stats
->buffer_size_compression_dist
[size_bucket
][compression_bucket
]);