+ if (vm_compression_limit == 0) {
+ compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE;
+ }
+ compressor_pool_multiplier = 1;
+
+#elif defined(__arm64__) && defined(XNU_TARGET_OS_WATCH)
+
+ /*
+ * On M9 watches the compressor can become big and can lead to
+ * churn in workingset resulting in audio drops. Setting a cap
+ * on the compressor size favors reclaiming unused memory
+ * sitting in idle band via jetsams
+ */
+
+#define COMPRESSOR_CAP_PERCENTAGE 37ULL
+
+ if (compressor_pool_max_size > max_mem) {
+ compressor_pool_max_size = max_mem;
+ }
+
+ if (vm_compression_limit == 0) {
+ compressor_pool_size = (max_mem * COMPRESSOR_CAP_PERCENTAGE) / 100ULL;
+ }
+ compressor_pool_multiplier = 1;
+
+#else
+
+ if (compressor_pool_max_size > max_mem) {
+ compressor_pool_max_size = max_mem;
+ }
+
+ if (vm_compression_limit == 0) {
+ compressor_pool_size = max_mem;
+ }
+ compressor_pool_multiplier = 1;
+#endif
+ if (compressor_pool_size > compressor_pool_max_size) {
+ compressor_pool_size = compressor_pool_max_size;
+ }
+
+try_again:
+ c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(C_SEG_ALLOCSIZE));
+ c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
+
+ c_segment_pages_compressed_limit = (c_segments_limit * (C_SEG_BUFSIZE / PAGE_SIZE) * compressor_pool_multiplier);
+
+ if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
+ if (!vm_compression_limit) {
+ c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
+ }
+ }
+
+ c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
+
+#if CONFIG_FREEZE
+ /*
+ * Our in-core limits are based on the size of the compressor pool.
+ * The c_segments_nearing_limit is also based on the compressor pool
+ * size and calculated above.
+ */
+ c_segments_incore_limit = c_segments_limit;
+
+ if (freezer_incore_cseg_acct) {
+ /*
+ * Add enough segments to track all frozen c_segs that can be stored in swap.
+ */
+ c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(C_SEG_ALLOCSIZE));
+ }
+#endif
+ /*
+ * Submap needs space for:
+ * - c_segments
+ * - c_buffers
+ * - swap reclaimations -- C_SEG_BUFSIZE
+ */
+ c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
+ c_buffers_size = vm_map_round_page(((vm_size_t)C_SEG_ALLOCSIZE * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
+
+ compressor_submap_size = c_segments_arr_size + c_buffers_size + C_SEG_BUFSIZE;
+
+#if RECORD_THE_COMPRESSED_DATA
+ c_compressed_record_sbuf_size = (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2);
+ compressor_submap_size += c_compressed_record_sbuf_size;
+#endif /* RECORD_THE_COMPRESSED_DATA */
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR,
+ &compressor_map);
+
+ if (retval != KERN_SUCCESS) {
+ if (++attempts > 3) {
+ panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size);
+ }
+
+ compressor_pool_size = compressor_pool_size / 2;
+
+ kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size);
+ goto try_again;
+ }
+ if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments),
+ (sizeof(union c_segu) * c_segments_limit), 0,
+ KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {