/*
- * Copyright (c) 2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/cpu_data.h>
#include <mach/mach_host.h>
#include <vm/vm_kern.h>
+#include <kern/startup.h>
+#include <kern/zalloc_internal.h>
+/* Size of array in magazine determined by boot-arg or default */
+TUNABLE(uint16_t, magazine_element_count, "zcc_magazine_element_count", 8);
-#if defined(__i386__) || defined(__x86_64__)
-#include <i386/mp.h>
-#endif
-
-#if defined (__arm__) || defined (__arm64__)
-#include <arm/cpu_data_internal.h>
-#endif
+/* Size of depot lists determined by boot-arg or default */
+TUNABLE(uint16_t, depot_element_count, "zcc_depot_element_count", 8);
-#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */
-#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */
-#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/
+SECURITY_READ_ONLY_LATE(zone_t) magazine_zone; /* zone to allocate zcc_magazine structs from */
+SECURITY_READ_ONLY_LATE(uintptr_t) zcache_canary; /* Canary used for the caching layer to prevent UaF attacks */
-lck_grp_t zcache_locks_grp; /* lock group for depot_lock */
-zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */
-uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */
-uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */
-bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */
-uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */
-
-/* The zcc_magazine is used as a stack to store cached zone elements. These
+/*
+ * The zcc_magazine is used as a stack to store cached zone elements. These
* sets of elements can be moved around to perform bulk operations.
*/
struct zcc_magazine {
uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */
uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */
- void *zcc_elements[0]; /* Array of pointers to objects */
+ vm_offset_t zcc_elements[0]; /* Array of pointers to objects */
};
-/* Each CPU will use one of these to store its elements
+/*
+ * Each CPU will use one of these to store its elements
*/
struct zcc_per_cpu_cache {
- struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */
- struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
-} __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */
-
+ /* Magazine from which we will always try to allocate from and free to first */
+ struct zcc_magazine *current;
+ /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
+ struct zcc_magazine *previous;
+ /* Zcache poisoning count */
+ uint32_t zp_count;
+#if ZALLOC_DETAILED_STATS
+ uint64_t zcc_allocs;
+ uint64_t zcc_frees;
+#endif /* ZALLOC_DETAILED_STATS */
+};
-/*
- * The depot layer can be invalid while zone_gc() is draining it out.
- * During that time, the CPU caches are active. For CPU magazine allocs and
- * frees, the caching layer reaches directly into the zone allocator.
- */
-#define ZCACHE_DEPOT_INVALID -1
-#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID)
/* This is the basic struct to take care of cahing and is included within
* the zone.
*/
-struct zone_cache {
- lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */
- struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */
- int zcc_depot_index; /* marks the point in the array where empty magazines begin */
- struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */
+struct zcc_depot {
+ /* marks the point in the array where empty magazines begin */
+ int zcc_depot_index;
+
+#if ZALLOC_DETAILED_STATS
+ uint64_t zcc_swap;
+ uint64_t zcc_fill;
+ uint64_t zcc_drain;
+ uint64_t zcc_fail;
+ uint64_t zcc_gc;
+#endif /* ZALLOC_DETAILED_STATS */
+
+ /* Stores full and empty magazines in the depot layer */
+ struct zcc_magazine *zcc_depot_list[0];
};
-
-void zcache_init_marked_zones(void);
-bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag);
-void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag);
-void zcache_mag_init(struct zcc_magazine *mag, int count);
-void *zcache_mag_pop(struct zcc_magazine *mag);
-void zcache_mag_push(struct zcc_magazine *mag, void *elem);
-bool zcache_mag_has_space(struct zcc_magazine *mag);
-bool zcache_mag_has_elements(struct zcc_magazine *mag);
-void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
-void zcache_mag_depot_swap_for_alloc(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
-void zcache_mag_depot_swap_for_free(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
-void zcache_mag_depot_swap(struct zone_cache *depot, struct zcc_per_cpu_cache *cache, boolean_t load_full);
-void zcache_canary_add(zone_t zone, void *addr);
-void zcache_canary_validate(zone_t zone, void *addr);
+static bool zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag);
+static void zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag);
+static bool zcache_mag_has_space(struct zcc_magazine *mag);
+static bool zcache_mag_has_elements(struct zcc_magazine *mag);
+static void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
+static void zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache);
+static void zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache);
+static void zcache_canary_add(zone_t zone, vm_offset_t addr);
+#if ZALLOC_ENABLE_POISONING
+static void zcache_validate_element(zone_t zone, vm_offset_t *addr, bool poison);
+static void zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup);
+#endif
/*
* zcache_ready
*
- * Description: returns whether or not the zone caches are ready to use
+ * Returns whether or not the zone caches are ready to use
*
*/
-bool
+static bool
zcache_ready(void)
{
- return zone_cache_ready;
-}
-
-/*
- * zcache_init_marked_zones
- *
- * Description: Initializes all parts of the per-cpu caches for the list of
- * marked zones once we are able to initalize caches. This should
- * only be called once, and will be called during the time that the
- * system is single threaded so we don't have to take the lock.
- *
- */
-void
-zcache_init_marked_zones(void)
-{
- unsigned int i;
- for (i = 0; i < num_zones; i++) {
- if (zone_array[i].cpu_cache_enable_when_ready) {
- zcache_init(&zone_array[i]);
- zone_array[i].cpu_cache_enable_when_ready = FALSE;
- }
- }
+ return magazine_zone != NULL;
}
/*
* zcache_bootstrap
*
- * Description: initializes zone to allocate magazines from and sets
- * magazine_element_count and depot_element_count from
- * boot-args or default values
+ * Initializes zone to allocate magazines from and sets
+ * magazine_element_count and depot_element_count from
+ * boot-args or default values
*
*/
-void
+__startup_func
+static void
zcache_bootstrap(void)
{
- /* use boot-arg for custom magazine size*/
- if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof(uint16_t))) {
- magazine_element_count = DEFAULT_MAGAZINE_SIZE;
- }
-
int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *);
-
- magazine_zone = zinit(magazine_size, 100000 * magazine_size, magazine_size, "zcc_magazine_zone");
-
- assert(magazine_zone != NULL);
-
- /* use boot-arg for custom depot size*/
- if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof(uint16_t))) {
- depot_element_count = DEFAULT_DEPOT_SIZE;
- }
-
- lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL);
+ zone_t magzone;
/* Generate the canary value for zone caches */
zcache_canary = (uintptr_t) early_random();
- zone_cache_ready = TRUE;
+ magzone = zone_create("zcc_magazine_zone", magazine_size,
+ ZC_NOCACHING | ZC_ZFREE_CLEARMEM);
+
+ /*
+ * This causes zcache_ready() to return true.
+ */
+ os_atomic_store(&magazine_zone, magzone, compiler_acq_rel);
- zcache_init_marked_zones();
+ /*
+ * Now that we are initialized, we can enable zone caching for zones that
+ * were made before zcache_bootstrap() was called.
+ *
+ * The system is still single threaded so we don't need to take the lock.
+ */
+ zone_index_foreach(i) {
+ if (zone_array[i].cpu_cache_enabled) {
+ zcache_init(&zone_array[i]);
+ }
+ }
+}
+STARTUP(ZALLOC, STARTUP_RANK_FOURTH, zcache_bootstrap);
+
+static struct zcc_magazine *
+zcache_mag_alloc(void)
+{
+ struct zcc_magazine *mag = zalloc_flags(magazine_zone, Z_WAITOK);
+ mag->zcc_magazine_capacity = magazine_element_count;
+ return mag;
}
/*
* zcache_init
*
- * Description: Initializes all parts of the per-cpu caches for a given zone
+ * Initializes all parts of the per-cpu caches for a given zone
*
- * Parameters: zone pointer to zone on which to iniitalize caching
+ * Parameters:
+ * zone pointer to zone on which to iniitalize caching
*
*/
void
zcache_init(zone_t zone)
{
- int i; /* used as index in for loops */
- vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */
- struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */
+ struct zcc_per_cpu_cache *pcpu_caches;
+ struct zcc_depot *depot;
+ vm_size_t size;
- /* Allocate chunk of memory for all structs */
- total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *));
+ /*
+ * If zcache hasn't been initialized yet, remember our decision,
+ *
+ * zcache_init() will be called again by zcache_bootstrap(),
+ * while the system is still single threaded, to build the missing caches.
+ */
+ if (!zcache_ready()) {
+ zone->cpu_cache_enabled = true;
+ return;
+ }
- temp_cache = (struct zone_cache *) kalloc(total_size);
+ /* Allocate chunk of memory for all structs */
+ size = sizeof(struct zcc_depot) + (depot_element_count * sizeof(void *));
+ depot = zalloc_permanent(size, ZALIGN_PTR);
+ size = sizeof(struct zcc_per_cpu_cache);
+ pcpu_caches = zalloc_percpu_permanent(size, ZALIGN_PTR);
/* Initialize a cache for every CPU */
- for (i = 0; i < MAX_CPUS; i++) {
- temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone);
- temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone);
-
- assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL);
-
- zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count);
- zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count);
+ zpercpu_foreach(cache, pcpu_caches) {
+ cache->current = zcache_mag_alloc();
+ cache->previous = zcache_mag_alloc();
+ cache->zp_count = zone_poison_count_init(zone);
}
- /* Initialize the lock on the depot layer */
- lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL);
-
/* Initialize empty magazines in the depot list */
- for (i = 0; i < depot_element_count; i++) {
- temp_cache->zcc_depot_list[i] = (struct zcc_magazine *)zalloc(magazine_zone);
-
- assert(temp_cache->zcc_depot_list[i] != NULL);
+ for (int i = 0; i < depot_element_count; i++) {
+ depot->zcc_depot_list[i] = zcache_mag_alloc();
+ }
- zcache_mag_init(temp_cache->zcc_depot_list[i], magazine_element_count);
+ lock_zone(zone);
+ if (zone->zcache.zcc_depot) {
+ panic("allocating caches for zone %s twice", zone->z_name);
}
- temp_cache->zcc_depot_index = 0;
+ /* Make the initialization of the per-cpu magazines visible. */
+ os_atomic_thread_fence(release);
- lock_zone(zone);
- zone->zcache = temp_cache;
- /* Set flag to know caching is enabled */
- zone->cpu_cache_enabled = TRUE;
+ zone->zcache.zcc_depot = depot;
+ zone->zcache.zcc_pcpu = pcpu_caches;
+ zone->cpu_cache_enabled = true;
unlock_zone(zone);
- return;
}
/*
* zcache_drain_depot
*
- * Description: Frees all the full magazines from the depot layer to the zone allocator as part
- * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
- * ensures that)
+ * Frees all the full magazines from the depot layer to the zone allocator as part
+ * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
+ * ensures that)
*
- * Parameters: zone pointer to zone for which the depot layer needs to be drained
+ * Parameters:
+ * zone pointer to zone for which the depot layer needs to be drained
*
* Returns: None
*
void
zcache_drain_depot(zone_t zone)
{
- struct zone_cache *zcache = zone->zcache;
+ struct zcc_depot *depot;
int drain_depot_index = 0;
- /*
- * Grab the current depot list from the zone cache. If it has full magazines,
- * mark the depot as invalid and drain it.
- */
- lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
- if (!zcache_depot_available(zcache) || (zcache->zcc_depot_index == 0)) {
- /* no full magazines in the depot or depot unavailable; nothing to drain here */
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
- return;
- }
- drain_depot_index = zcache->zcc_depot_index;
- /* Mark the depot as unavailable */
- zcache->zcc_depot_index = ZCACHE_DEPOT_INVALID;
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
-
- /* Now drain the full magazines in the depot */
+ lock_zone(zone);
+ depot = zone->zcache.zcc_depot;
+ drain_depot_index = depot->zcc_depot_index;
for (int i = 0; i < drain_depot_index; i++) {
- zcache_mag_drain(zone, zcache->zcc_depot_list[i]);
+ zcache_mag_drain_locked(zone, depot->zcc_depot_list[i]);
}
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_gc += drain_depot_index;
+#endif /* ZALLOC_DETAILED_STATS */
+ depot->zcc_depot_index = 0;
+ unlock_zone(zone);
+}
- lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
- /* Mark the depot as available again */
- zcache->zcc_depot_index = 0;
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
+__attribute__((noinline))
+static void
+zcache_free_to_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache)
+{
+ struct zcc_depot *depot;
+
+ lock_zone(zone);
+ depot = zone->zcache.zcc_depot;
+ if (depot->zcc_depot_index < depot_element_count) {
+ /* If able, rotate in a new empty magazine from the depot and retry */
+ zcache_mag_depot_swap_for_free(depot, per_cpu_cache);
+ } else {
+ /* Free an entire magazine of elements */
+ zcache_mag_drain_locked(zone, per_cpu_cache->current);
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_drain++;
+#endif /* ZALLOC_DETAILED_STATS */
+ }
+ unlock_zone(zone);
}
-/*
- * zcache_free_to_cpu_cache
- *
- * Description: Checks per-cpu caches to free element there if possible
- *
- * Parameters: zone pointer to zone for which element comes from
- * addr pointer to element to free
- *
- * Returns: TRUE if successfull, FALSE otherwise
- *
- * Precondition: check that caching is enabled for zone
- */
-bool
-zcache_free_to_cpu_cache(zone_t zone, void *addr)
+void
+zcache_free_to_cpu_cache(zone_t zone, zone_stats_t zstats, vm_offset_t addr)
{
- int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
- struct zone_cache *zcache; /* local storage of the zone's cache */
- struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
+ struct zcc_per_cpu_cache *per_cpu_cache;
+ vm_offset_t elem = addr;
+ int cpu;
+
+ zone_allocated_element_validate(zone, elem);
+
+ /*
+ * This is racy but we don't need zp_count to be accurate.
+ * This allows us to do the poisoning with preemption enabled.
+ */
+ per_cpu_cache = zpercpu_get(zone->zcache.zcc_pcpu);
+ if (zfree_clear_or_poison(zone, &per_cpu_cache->zp_count, elem)) {
+ addr |= ZALLOC_ELEMENT_NEEDS_VALIDATION;
+ } else {
+ zcache_canary_add(zone, elem);
+ }
+
+#if KASAN_ZALLOC
+ kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED);
+#endif
disable_preemption();
- curcpu = current_processor()->cpu_id;
- zcache = zone->zcache;
- per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
+ cpu = cpu_number();
+ per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu);
if (zcache_mag_has_space(per_cpu_cache->current)) {
/* If able, free into current magazine */
- goto free_to_current;
} else if (zcache_mag_has_space(per_cpu_cache->previous)) {
/* If able, swap current and previous magazine and retry */
zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
- goto free_to_current;
} else {
- lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
- if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) {
- /* If able, rotate in a new empty magazine from the depot and retry */
- zcache_mag_depot_swap_for_free(zcache, per_cpu_cache);
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
- goto free_to_current;
- }
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
- /* Attempt to free an entire magazine of elements */
- zcache_mag_drain(zone, per_cpu_cache->current);
- if (zcache_mag_has_space(per_cpu_cache->current)) {
- goto free_to_current;
- }
+ zcache_free_to_cpu_cache_slow(zone, per_cpu_cache);
}
- /* If not able to use cache return FALSE and fall through to zfree */
+ struct zcc_magazine *mag = per_cpu_cache->current;
+ mag->zcc_elements[mag->zcc_magazine_index++] = addr;
+ zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += zone_elem_size(zone);
+#if ZALLOC_DETAILED_STATS
+ per_cpu_cache->zcc_frees++;
+#endif /* ZALLOC_DETAILED_STATS */
+
enable_preemption();
- return FALSE;
+}
-free_to_current:
- assert(zcache_mag_has_space(per_cpu_cache->current));
- zcache_canary_add(zone, addr);
- zcache_mag_push(per_cpu_cache->current, addr);
+__attribute__((noinline))
+static bool
+zcache_alloc_from_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache)
+{
+ struct zcc_depot *depot;
-#if KASAN_ZALLOC
- kasan_poison_range((vm_offset_t)addr, zone->elem_size, ASAN_HEAP_FREED);
-#endif
+ lock_zone(zone);
+ depot = zone->zcache.zcc_depot;
+ if (depot->zcc_depot_index > 0) {
+ /* If able, rotate in a full magazine from the depot */
+ zcache_mag_depot_swap_for_alloc(depot, per_cpu_cache);
+ } else if (zcache_mag_fill_locked(zone, per_cpu_cache->current)) {
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_fill++;
+#endif /* ZALLOC_DETAILED_STATS */
+ } else {
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_fail++;
+#endif /* ZALLOC_DETAILED_STATS */
+ /* If unable to allocate from cache return NULL and fall through to zalloc */
+ unlock_zone(zone);
+ enable_preemption();
+ return false;
+ }
+ unlock_zone(zone);
- enable_preemption();
- return TRUE;
+ return true;
}
-
-/*
- * zcache_alloc_from_cpu_cache
- *
- * Description: Checks per-cpu caches to allocate element from there if possible
- *
- * Parameters: zone pointer to zone for which element will come from
- *
- * Returns: pointer to usable element
- *
- * Precondition: check that caching is enabled for zone
- */
vm_offset_t
-zcache_alloc_from_cpu_cache(zone_t zone)
+zcache_alloc_from_cpu_cache(zone_t zone, zone_stats_t zstats, vm_size_t waste)
{
- int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
- void *ret = NULL; /* Points to the element which will be returned */
- struct zone_cache *zcache; /* local storage of the zone's cache */
- struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
+ struct zcc_per_cpu_cache *per_cpu_cache;
+ int cpu;
disable_preemption();
- curcpu = current_processor()->cpu_id;
- zcache = zone->zcache;
- per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
+ cpu = cpu_number();
+ per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu);
if (zcache_mag_has_elements(per_cpu_cache->current)) {
/* If able, allocate from current magazine */
- goto allocate_from_current;
} else if (zcache_mag_has_elements(per_cpu_cache->previous)) {
/* If able, swap current and previous magazine and retry */
zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
- goto allocate_from_current;
- } else {
- lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
- if (zcache_depot_available(zcache) && (zcache->zcc_depot_index > 0)) {
- /* If able, rotate in a full magazine from the depot */
- zcache_mag_depot_swap_for_alloc(zcache, per_cpu_cache);
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
- goto allocate_from_current;
- }
- lck_mtx_unlock(&(zcache->zcc_depot_lock));
- /* Attempt to allocate an entire magazine of elements */
- if (zcache_mag_fill(zone, per_cpu_cache->current)) {
- goto allocate_from_current;
- }
+ } else if (!zcache_alloc_from_cpu_cache_slow(zone, per_cpu_cache)) {
+ return (vm_offset_t)NULL;
+ }
+
+ struct zcc_magazine *mag = per_cpu_cache->current;
+ vm_offset_t elem_size = zone_elem_size(zone);
+ uint32_t index = --mag->zcc_magazine_index;
+ vm_offset_t addr = mag->zcc_elements[index];
+ mag->zcc_elements[index] = 0;
+ zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += elem_size;
+#if ZALLOC_DETAILED_STATS
+ if (waste) {
+ zpercpu_get_cpu(zstats, cpu)->zs_mem_wasted += waste;
}
+ per_cpu_cache->zcc_allocs++;
+#else
+ (void)waste;
+#endif /* ZALLOC_DETAILED_STATS */
- /* If unable to allocate from cache return NULL and fall through to zalloc */
enable_preemption();
- return (vm_offset_t) NULL;
-allocate_from_current:
- ret = zcache_mag_pop(per_cpu_cache->current);
- assert(ret != NULL);
- zcache_canary_validate(zone, ret);
+#if ZALLOC_ENABLE_POISONING
+ bool validate = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION;
+#endif /* ZALLOC_ENABLE_POISONING */
+
+ addr &= ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
#if KASAN_ZALLOC
- kasan_poison_range((vm_offset_t)ret, zone->elem_size, ASAN_VALID);
+ kasan_poison_range(addr, elem_size, ASAN_VALID);
#endif
+#if ZALLOC_ENABLE_POISONING
+ if (!validate) {
+ vm_offset_t backup = addr + elem_size - sizeof(vm_offset_t);
+ zcache_validate_and_clear_canary(zone, (vm_offset_t *)addr,
+ (vm_offset_t *)backup);
+ }
+ zalloc_validate_element(zone, addr, elem_size, validate);
+#endif /* ZALLOC_ENABLE_POISONING */
- enable_preemption();
- return (vm_offset_t) ret;
-}
-
-
-/*
- * zcache_mag_init
- *
- * Description: initializes fields in a zcc_magazine struct
- *
- * Parameters: mag pointer to magazine to initialize
- *
- */
-void
-zcache_mag_init(struct zcc_magazine *mag, int count)
-{
- mag->zcc_magazine_index = 0;
- mag->zcc_magazine_capacity = count;
+ return addr;
}
/*
- * zcache_mag_fill
+ * zcache_mag_fill_locked
*
- * Description: fills a magazine with as many elements as the zone can give
- * without blocking to carve out more memory
+ * Fills a magazine with as many elements as the zone can give
+ * without blocking to carve out more memory
*
- * Parameters: zone zone from which to allocate
- * mag pointer to magazine to fill
+ * Parameters:
+ * zone zone from which to allocate
+ * mag pointer to magazine to fill
*
* Return: True if able to allocate elements, false is mag is still empty
*/
-bool
-zcache_mag_fill(zone_t zone, struct zcc_magazine *mag)
+static bool
+zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag)
{
- assert(mag->zcc_magazine_index == 0);
- void* elem = NULL;
- uint32_t i;
- lock_zone(zone);
- for (i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i++) {
- elem = zalloc_attempt(zone);
- if (elem) {
+ uint32_t i = mag->zcc_magazine_index;
+ uint32_t end = mag->zcc_magazine_capacity;
+ vm_offset_t elem, addr;
+
+ while (i < end && zone->countfree) {
+ addr = zalloc_direct_locked(zone, Z_NOWAIT, 0);
+ elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
+ if (addr & ZALLOC_ELEMENT_NEEDS_VALIDATION) {
+ zone_clear_freelist_pointers(zone, elem);
+ } else {
zcache_canary_add(zone, elem);
- zcache_mag_push(mag, elem);
+ }
#if KASAN_ZALLOC
- kasan_poison_range((vm_offset_t)elem, zone->elem_size, ASAN_HEAP_FREED);
+ kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED);
#endif
- } else {
- break;
- }
- }
- unlock_zone(zone);
- if (i == 0) {
- return FALSE;
+ mag->zcc_elements[i++] = addr;
}
- return TRUE;
-}
-/*
- * zcache_mag_drain
- *
- * Description: frees all elements in a magazine
- *
- * Parameters: zone zone to which elements will be freed
- * mag pointer to magazine to empty
- *
- */
-void
-zcache_mag_drain(zone_t zone, struct zcc_magazine *mag)
-{
- assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity);
- lock_zone(zone);
- while (mag->zcc_magazine_index > 0) {
- uint32_t index = --mag->zcc_magazine_index;
- zcache_canary_validate(zone, mag->zcc_elements[index]);
- zfree_direct(zone, (vm_offset_t)mag->zcc_elements[index]);
- mag->zcc_elements[mag->zcc_magazine_index] = 0;
- }
- unlock_zone(zone);
-}
+ mag->zcc_magazine_index = i;
-/*
- * zcache_mag_pop
- *
- * Description: removes last element from magazine in a stack pop fashion
- * zcc_magazine_index represents the number of elements on the
- * stack, so it the index of where to save the next element, when
- * full, it will be 1 past the last index of the array
- *
- * Parameters: mag pointer to magazine from which to remove element
- *
- * Returns: pointer to element removed from magazine
- *
- * Precondition: must check that magazine is not empty before calling
- */
-void *
-zcache_mag_pop(struct zcc_magazine *mag)
-{
- void *elem;
- assert(zcache_mag_has_elements(mag));
- elem = mag->zcc_elements[--mag->zcc_magazine_index];
- /* Ensure pointer to element cannot be accessed after we pop it */
- mag->zcc_elements[mag->zcc_magazine_index] = NULL;
- assert(elem != NULL);
- return elem;
+ return i != 0;
}
-
/*
- * zcache_mag_push
+ * zcache_mag_drain_locked
*
- * Description: adds element to magazine and increments zcc_magazine_index
- * zcc_magazine_index represents the number of elements on the
- * stack, so it the index of where to save the next element, when
- * full, it will be 1 past the last index of the array
+ * Frees all elements in a magazine
*
- * Parameters: mag pointer to magazine from which to remove element
- * elem pointer to element to add
+ * Parameters:
+ * zone zone to which elements will be freed
+ * mag pointer to magazine to empty
*
- * Precondition: must check that magazine is not full before calling
*/
-void
-zcache_mag_push(struct zcc_magazine *mag, void *elem)
+static void
+zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag)
{
- assert(zcache_mag_has_space(mag));
- mag->zcc_elements[mag->zcc_magazine_index++] = elem;
+ vm_offset_t elem, addr;
+ bool poison;
+
+ for (uint32_t i = 0, end = mag->zcc_magazine_index; i < end; i++) {
+ addr = mag->zcc_elements[i];
+ poison = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION;
+ elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
+
+#if ZALLOC_ENABLE_POISONING
+ zcache_validate_element(zone, (vm_offset_t *)elem, poison);
+#endif /* ZALLOC_ENABLE_POISONING */
+ zfree_direct_locked(zone, elem, poison);
+ mag->zcc_elements[i] = 0;
+ }
+ mag->zcc_magazine_index = 0;
}
/*
* zcache_mag_has_space
*
- * Description: checks if magazine still has capacity
+ * Checks if magazine still has capacity
*
- * Parameters: mag pointer to magazine to check
+ * Parameters:
+ * mag pointer to magazine to check
*
* Returns: true if magazine is full
*
*/
-bool
+static bool
zcache_mag_has_space(struct zcc_magazine *mag)
{
return mag->zcc_magazine_index < mag->zcc_magazine_capacity;
/*
* zcache_mag_has_elements
*
- * Description: checks if magazine is empty
+ * Checks if magazine is empty
*
- * Parameters: mag pointer to magazine to check
+ * Parameters:
+ * mag pointer to magazine to check
*
* Returns: true if magazine has no elements
*
*/
-bool
+static bool
zcache_mag_has_elements(struct zcc_magazine *mag)
{
return mag->zcc_magazine_index > 0;
/*
* zcache_swap_magazines
*
- * Description: Function which swaps two pointers of any type
+ * Function which swaps two pointers of any type
*
- * Parameters: a pointer to first pointer
- * b pointer to second pointer
+ * Parameters:
+ * a pointer to first pointer
+ * b pointer to second pointer
*/
-void
+static void
zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b)
{
struct zcc_magazine *temp = *a;
/*
* zcache_mag_depot_swap_for_alloc
*
- * Description: Swaps a full magazine into the current position
+ * Swaps a full magazine into the current position
*
- * Parameters: zcache pointer to the zone_cache to access the depot
- * cache pointer to the current per-cpu cache
+ * Parameters:
+ * depot pointer to the depot
+ * cache pointer to the current per-cpu cache
*
* Precondition: Check that the depot list has full elements
*/
-void
-zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
+static void
+zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache)
{
/* Loads a full magazine from which we can allocate */
- assert(zcache_depot_available(zcache));
- assert(zcache->zcc_depot_index > 0);
- zcache->zcc_depot_index--;
- zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
+ assert(depot->zcc_depot_index > 0);
+ depot->zcc_depot_index--;
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_swap++;
+#endif /* ZALLOC_DETAILED_STATS */
+ zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]);
}
/*
* zcache_mag_depot_swap_for_free
*
- * Description: Swaps an empty magazine into the current position
+ * Swaps an empty magazine into the current position
*
- * Parameters: zcache pointer to the zone_cache to access the depot
- * cache pointer to the current per-cpu cache
+ * Parameters:
+ * depot pointer to the depot
+ * cache pointer to the current per-cpu cache
*
* Precondition: Check that the depot list has empty elements
*/
-void
-zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
+static void
+zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache)
{
/* Loads an empty magazine into which we can free */
- assert(zcache_depot_available(zcache));
- assert(zcache->zcc_depot_index < depot_element_count);
- zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
- zcache->zcc_depot_index++;
+ assert(depot->zcc_depot_index < depot_element_count);
+ zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]);
+#if ZALLOC_DETAILED_STATS
+ depot->zcc_swap++;
+#endif /* ZALLOC_DETAILED_STATS */
+ depot->zcc_depot_index++;
}
/*
* zcache_canary_add
*
- * Description: Adds a canary to an element by putting zcache_canary at the first
- * and last location of the element
- *
- * Parameters: zone zone for the element
- * addr element address to add canary to
+ * Adds a canary to an element by putting zcache_canary at the first
+ * and last location of the element
*
+ * Parameters:
+ * zone zone for the element
+ * addr element address to add canary to
*/
-void
-zcache_canary_add(zone_t zone, void *element)
+static void
+zcache_canary_add(zone_t zone, vm_offset_t element)
{
+#if ZALLOC_ENABLE_POISONING
vm_offset_t *primary = (vm_offset_t *)element;
- vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
+ vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary +
+ zone_elem_size(zone) - sizeof(vm_offset_t));
*primary = *backup = (zcache_canary ^ (uintptr_t)element);
+#else
+#pragma unused(zone, element)
+#endif
+}
+
+#if ZALLOC_ENABLE_POISONING
+__abortlike static void
+zcache_validation_panic(zone_t zone, vm_offset_t *primary, vm_offset_t *backup,
+ vm_offset_t permutation)
+{
+ vm_offset_t primary_value = 0;
+ vm_offset_t backup_value = 0;
+
+ if (permutation == zcache_canary) {
+ primary_value = *primary ^ (vm_offset_t)primary;
+ backup_value = *backup ^ (vm_offset_t)primary;
+ permutation = permutation ^ (vm_offset_t)primary;
+ } else {
+ primary_value = *primary;
+ backup_value = *backup;
+ }
+ if (primary_value != permutation) {
+ panic("Zone cache element was used after free! Element %p was corrupted at "
+ "beginning; Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
+ primary, (uintptr_t) permutation, (uintptr_t) *primary, zcache_canary, zone,
+ zone_heap_name(zone), zone->z_name);
+ } else {
+ panic("Zone cache element was used after free! Element %p was corrupted at end; "
+ "Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
+ primary, (uintptr_t) permutation, (uintptr_t) *backup, zcache_canary, zone,
+ zone_heap_name(zone), zone->z_name);
+ }
}
/*
- * zcache_canary_validate
+ * zcache_validate_and_clear_canary
+ *
+ * Validates an element of the zone cache to make sure it still contains the zone
+ * caching canary and clears it.
*
- * Description: Validates an element of the zone cache to make sure it still contains the zone
- * caching canary.
+ * Parameters:
+ * zone zone for the element
+ * primary addr of canary placed in front
+ * backup addr of canary placed at the back
+ */
+static void
+zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup)
+{
+ vm_offset_t primary_value = (*primary ^ (uintptr_t)primary);
+ vm_offset_t backup_value = (*backup ^ (uintptr_t)primary);
+
+ if (primary_value == zcache_canary && backup_value == zcache_canary) {
+ *primary = *backup = ZONE_POISON;
+ } else {
+ zcache_validation_panic(zone, primary, backup, zcache_canary);
+ }
+}
+
+/*
+ * zcache_validate_element
*
- * Parameters: zone zone for the element
- * addr element address to validate
+ * Validates the first and last pointer size of the element to ensure
+ * that they haven't been altered. This function is used when an
+ * element moves from cache to zone, therefore only validing the
+ * first and last pointer size (location of future freelist pointers).
*
+ * Parameters:
+ * zone zone for the element
+ * element addr of element to validate
+ * poison has the element been poisoned
*/
-void
-zcache_canary_validate(zone_t zone, void *element)
+static void
+zcache_validate_element(zone_t zone, vm_offset_t *element, bool poison)
{
vm_offset_t *primary = (vm_offset_t *)element;
- vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
+ vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary +
+ zone_elem_size(zone) - sizeof(vm_offset_t));
- vm_offset_t primary_value = (*primary ^ (uintptr_t)element);
- if (primary_value != zcache_canary) {
- panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p; zone %p (%s)",
- element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*primary), (void *)zcache_canary, zone, zone->zone_name);
+ if (zone->zfree_clear_mem) {
+ if (*primary == 0 && *backup == 0) {
+ return;
+ } else {
+ zcache_validation_panic(zone, primary, backup, 0);
+ }
}
- vm_offset_t backup_value = (*backup ^ (uintptr_t)element);
- if (backup_value != zcache_canary) {
- panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p; zone %p (%s)",
- element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary, zone, zone->zone_name);
+ if (__probable(!poison)) {
+ zcache_validate_and_clear_canary(zone, primary, backup);
+ } else {
+ if (*primary == ZONE_POISON && *backup == ZONE_POISON) {
+ return;
+ } else {
+ zcache_validation_panic(zone, primary, backup, ZONE_POISON);
+ }
}
}
+#endif /* ZALLOC_ENABLE_POISONING */