* Copyright (c) 2010-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* This module implements a hybrid/adaptive compression scheme, using WKdm where
#define VMDBGSTAT (DEBUG)
#if VMDBGSTATS
-#define VM_COMPRESSOR_STAT_DBG(x...) \
- do { \
- (x); \
+#define VM_COMPRESSOR_STAT_DBG(x...) \
+ do { \
+ (x); \
} while(0)
#else
-#define VM_COMPRESSOR_STAT_DBG(x...) \
- do { \
+#define VM_COMPRESSOR_STAT_DBG(x...) \
+ do { \
} while (0)
#endif
#define VMCSTATS (DEVELOPMENT || DEBUG)
#if VMCSTATS
-#define VM_COMPRESSOR_STAT(x...) \
- do { \
- (x); \
+#define VM_COMPRESSOR_STAT(x...) \
+ do { \
+ (x); \
} while(0)
//TODO make atomic where needed, decompression paths
-#define VM_DECOMPRESSOR_STAT(x...) \
- do { \
- (x); \
+#define VM_DECOMPRESSOR_STAT(x...) \
+ do { \
+ (x); \
} while(0)
#else
-#define VM_COMPRESSOR_STAT(x...) \
- do { \
+#define VM_COMPRESSOR_STAT(x...) \
+ do { \
}while (0)
-#define VM_DECOMPRESSOR_STAT(x...) \
- do { \
+#define VM_DECOMPRESSOR_STAT(x...) \
+ do { \
}while (0)
#endif
-static inline enum compressor_preselect_t compressor_preselect(void) {
+static inline enum compressor_preselect_t
+compressor_preselect(void)
+{
if (vmcstate.lz4_failure_skips >= vmctune.lz4_max_failure_skips) {
vmcstate.lz4_failure_skips = 0;
vmcstate.lz4_failure_run_length = 0;
return CPRESELWK;
}
-static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) {
+static inline void
+compressor_selector_update(int lz4sz, int didwk, int wksz)
+{
VM_COMPRESSOR_STAT(compressor_stats.lz4_compressions++);
if (lz4sz == 0) {
- VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes+=PAGE_SIZE);
+ VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes += PAGE_SIZE);
VM_COMPRESSOR_STAT(compressor_stats.lz4_compression_failures++);
vmcstate.lz4_failure_run_length++;
VM_COMPRESSOR_STAT(vmcstate.lz4_total_failures++);
} else {
vmcstate.lz4_failure_run_length = 0;
- VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes+=lz4sz);
+ VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes += lz4sz);
if (lz4sz <= vmctune.wkdm_reeval_threshold) {
vmcstate.lz4_run_length = 0;
if (didwk) {
if (__probable(wksz > lz4sz)) {
uint32_t lz4delta = wksz - lz4sz;
- VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_delta+=lz4delta);
+ VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_delta += lz4delta);
if (lz4delta >= vmctune.lz4_run_continue_bytes) {
vmcstate.lz4_run_length++;
} else if (lz4delta <= vmctune.lz4_profitable_bytes) {
vmcstate.lz4_run_length = 0;
}
} else {
- VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_negative_delta+=(lz4sz-wksz));
+ VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_negative_delta += (lz4sz - wksz));
vmcstate.lz4_failure_run_length++;
VM_COMPRESSOR_STAT(vmcstate.lz4_total_negatives++);
vmcstate.lz4_run_length = 0;
}
-static inline void WKdm_hv(uint32_t *wkbuf) {
+static inline void
+WKdm_hv(uint32_t *wkbuf)
+{
#if DEVELOPMENT || DEBUG
uint32_t *inw = (uint32_t *) wkbuf;
if (*inw != MZV_MAGIC) {
if ((*inw | *(inw + 1) | *(inw + 2)) & 0xFFFF0000) {
- panic("WKdm(%p): invalid header 0x%x 0x%x 0x%x\n", wkbuf, *inw, *(inw +1), *(inw+2));
+ panic("WKdm(%p): invalid header 0x%x 0x%x 0x%x\n", wkbuf, *inw, *(inw + 1), *(inw + 2));
}
}
#else /* DEVELOPMENT || DEBUG */
#if defined(__arm64__)
#endif
-static inline void WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes) {
+static inline bool
+WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes,
+ __unused uint32_t *pop_count)
+{
#if defined(__arm64__)
#endif
WKdm_hv(src_buf);
#else /* !defined arm64 */
WKdm_decompress_new(src_buf, dest_buf, scratch, bytes);
#endif
+ return true;
}
#if DEVELOPMENT || DEBUG
int precompy, wkswhw;
#endif
-static inline int WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, boolean_t *incomp_copy, unsigned int limit) {
+static inline int
+WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch,
+ boolean_t *incomp_copy, unsigned int limit, __unused uint32_t *pop_count)
+{
(void)incomp_copy;
int wkcval;
#if defined(__arm64__)
}
-int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, void *cscratchin, boolean_t *incomp_copy) {
+int
+metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec,
+ void *cscratchin, boolean_t *incomp_copy, uint32_t *pop_count_p)
+{
int sz = -1;
int dowk = FALSE, dolz4 = FALSE, skiplz4 = FALSE;
int insize = PAGE_SIZE;
compressor_encode_scratch_t *cscratch = cscratchin;
+ /* Not all paths lead to an inline population count. */
+ uint32_t pop_count = C_SLOT_NO_POPCOUNT;
if (vm_compressor_current_codec == CMODE_WK) {
dowk = TRUE;
if (dowk) {
*codec = CCWK;
VM_COMPRESSOR_STAT(compressor_stats.wk_compressions++);
- sz = WKdmC(in, cdst, &cscratch->wkscratch[0], incomp_copy, outbufsz);
+ sz = WKdmC(in, cdst, &cscratch->wkscratch[0], incomp_copy, outbufsz, &pop_count);
if (sz == -1) {
- VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=PAGE_SIZE);
+ VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += PAGE_SIZE);
VM_COMPRESSOR_STAT(compressor_stats.wk_compression_failures++);
if (vm_compressor_current_codec == CMODE_HYB) {
goto cexit;
} else if (sz == 0) {
VM_COMPRESSOR_STAT(compressor_stats.wk_sv_compressions++);
- VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=4);
+ VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += 4);
} else {
- VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=sz);
+ VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += sz);
}
}
lz4eval:
int wkc = (sz == -1) ? PAGE_SIZE : sz;
#endif
VM_COMPRESSOR_STAT(compressor_stats.wk_compressions_exclusive++);
- VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_exclusive+=wkc);
+ VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_exclusive += wkc);
goto cexit;
}
}
}
}
cexit:
+ assert(pop_count_p != NULL);
+ *pop_count_p = pop_count;
return sz;
}
-void metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t ccodec, void *compressor_dscratchin) {
+bool
+metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize,
+ uint16_t ccodec, void *compressor_dscratchin, uint32_t *pop_count_p)
+{
int dolz4 = (ccodec == CCLZ4);
int rval;
compressor_decode_scratch_t *compressor_dscratch = compressor_dscratchin;
-
+ /* Not all paths lead to an inline population count. */
+ uint32_t pop_count = C_SLOT_NO_POPCOUNT;
+ bool success;
+
if (dolz4) {
rval = (int)lz4raw_decode_buffer(dest, PAGE_SIZE, source, csize, &compressor_dscratch->lz4decodestate[0]);
- VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressions+=1);
- VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressed_bytes+=csize);
+ VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressions += 1);
+ VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressed_bytes += csize);
#if DEVELOPMENT || DEBUG
uint32_t *d32 = dest;
#endif
assertf(rval == PAGE_SIZE, "LZ4 decode: size != pgsize %d, header: 0x%x, 0x%x, 0x%x",
- rval, *d32, *(d32+1), *(d32+2));
+ rval, *d32, *(d32 + 1), *(d32 + 2));
+ success = (rval == PAGE_SIZE);
} else {
assert(ccodec == CCWK);
- WKdmD(source, dest, &compressor_dscratch->wkdecompscratch[0], csize);
+ success = WKdmD(source, dest, &compressor_dscratch->wkdecompscratch[0], csize, &pop_count);
- VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressions+=1);
- VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressed_bytes+=csize);
+ VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressions += 1);
+ VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressed_bytes += csize);
}
+
+ assert(pop_count_p != NULL);
+ *pop_count_p = pop_count;
+ return success;
}
#pragma clang diagnostic pop
-uint32_t vm_compressor_get_encode_scratch_size(void) {
+uint32_t
+vm_compressor_get_encode_scratch_size(void)
+{
if (vm_compressor_current_codec != VM_COMPRESSOR_DEFAULT_CODEC) {
return MAX(sizeof(compressor_encode_scratch_t), WKdm_SCRATCH_BUF_SIZE_INTERNAL);
} else {
}
}
-uint32_t vm_compressor_get_decode_scratch_size(void) {
+uint32_t
+vm_compressor_get_decode_scratch_size(void)
+{
if (vm_compressor_current_codec != VM_COMPRESSOR_DEFAULT_CODEC) {
return MAX(sizeof(compressor_decode_scratch_t), WKdm_SCRATCH_BUF_SIZE_INTERNAL);
} else {
}
-int vm_compressor_algorithm(void) {
+int
+vm_compressor_algorithm(void)
+{
return vm_compressor_current_codec;
}
-void vm_compressor_algorithm_init(void) {
+void
+vm_compressor_algorithm_init(void)
+{
vm_compressor_mode_t new_codec = VM_COMPRESSOR_DEFAULT_CODEC;
#if defined(__arm64__)
PE_parse_boot_argn("vm_compressor_codec", &new_codec, sizeof(new_codec));
assertf(((new_codec == VM_COMPRESSOR_DEFAULT_CODEC) || (new_codec == CMODE_WK) ||
- (new_codec == CMODE_LZ4) || (new_codec == CMODE_HYB)),
+ (new_codec == CMODE_LZ4) || (new_codec == CMODE_HYB)),
"Invalid VM compression codec: %u", new_codec);
-#if defined(__arm__)||defined(__arm64__)
+#if defined(__arm__) || defined(__arm64__)
uint32_t tmpc;
if (PE_parse_boot_argn("-vm_compressor_wk", &tmpc, sizeof(tmpc))) {
new_codec = VM_COMPRESSOR_DEFAULT_CODEC;