}
/* Reset gzalloc_data. */
- lock_zone(zone);
+ zone_lock(zone);
memcpy((void *)gzfc_copy, (void *)zone->gz.gzfc, gzfcsz);
bzero((void *)zone->gz.gzfc, gzfcsz);
zone->gz.gzfc_index = 0;
- unlock_zone(zone);
+ zone_unlock(zone);
/* Free up all the cached elements. */
for (uint32_t index = 0; index < gzfc_size; index++) {
*/
/* Decrement zone counters. */
- lock_zone(zone);
- zone->countfree += freed_elements;
- zone->page_count -= freed_elements;
- unlock_zone(zone);
+ zone_lock(zone);
+ zone->z_elems_free += freed_elements;
+ zone->z_wired_cur -= freed_elements;
+ zone_unlock(zone);
kmem_free(kernel_map, gzfc_copy, gzfcsz);
}
vm_offset_t residue = rounded_size - zone_elem_size(zone);
vm_offset_t gzaddr = 0;
gzhdr_t *gzh, *gzhcopy = NULL;
+ bool new_va = false;
if (!kmem_ready || (vm_page_zone == ZONE_NULL)) {
/* Early allocations are supplied directly from the
panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d",
(uint64_t)rounded_size, kr);
}
+ new_va = true;
}
if (gzalloc_uf_mode) {
addr = (gzaddr + residue);
}
- if (zone->zfree_clear_mem) {
+ if (zone->z_free_zeroes) {
bzero((void *)gzaddr, rounded_size);
} else {
/* Fill with a pattern on allocation to trap uninitialized
*gzhcopy = *gzh;
}
- lock_zone(zone);
+ zone_lock(zone);
assert(zone->z_self == zone);
- zone->countfree--;
- zone->page_count += 1;
+ zone->z_elems_free--;
+ if (new_va) {
+ zone->z_va_cur += 1;
+ }
+ zone->z_wired_cur += 1;
zpercpu_get(zstats)->zs_mem_allocated += rounded_size;
-#if ZALLOC_DETAILED_STATS
- zpercpu_get(zstats)->zs_mem_wasted += rounded_size - zone_elem_size(zone);
-#endif /* ZALLOC_DETAILED_STATS */
- unlock_zone(zone);
+ zone_unlock(zone);
OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated);
OSAddAtomic64((SInt32) (rounded_size - zone_elem_size(zone)), &gzalloc_wasted);
}
if (gzfc_size && gzalloc_dfree_check) {
- lock_zone(zone);
+ zone_lock(zone);
assert(zone->z_self == zone);
for (uint32_t gd = 0; gd < gzfc_size; gd++) {
if (zone->gz.gzfc[gd] != saddr) {
"current free cache index: %d, freed index: %d",
__func__, saddr, zone->gz.gzfc_index, gd);
}
- unlock_zone(zone);
+ zone_unlock(zone);
}
if (gzalloc_consistency_checks) {
free_addr = saddr;
}
- lock_zone(zone);
+ zone_lock(zone);
assert(zone->z_self == zone);
/* Insert newly freed element into the protected free element
}
if (free_addr) {
- zone->countfree++;
- zone->page_count -= 1;
+ zone->z_elems_free++;
+ zone->z_wired_cur -= 1;
}
zpercpu_get(zstats)->zs_mem_freed += rounded_size;
- unlock_zone(zone);
+ zone_unlock(zone);
if (free_addr) {
// TODO: consider using physical reads to check for