]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/zcache.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / kern / zcache.c
CommitLineData
d9a64523 1/*
f427ee49 2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
d9a64523
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <kern/cpu_data.h>
31#include <mach/mach_host.h>
32#include <vm/vm_kern.h>
f427ee49
A
33#include <kern/startup.h>
34#include <kern/zalloc_internal.h>
d9a64523 35
f427ee49
A
36/* Size of array in magazine determined by boot-arg or default */
37TUNABLE(uint16_t, magazine_element_count, "zcc_magazine_element_count", 8);
d9a64523 38
f427ee49
A
39/* Size of depot lists determined by boot-arg or default */
40TUNABLE(uint16_t, depot_element_count, "zcc_depot_element_count", 8);
d9a64523 41
f427ee49
A
42SECURITY_READ_ONLY_LATE(zone_t) magazine_zone; /* zone to allocate zcc_magazine structs from */
43SECURITY_READ_ONLY_LATE(uintptr_t) zcache_canary; /* Canary used for the caching layer to prevent UaF attacks */
d9a64523 44
f427ee49
A
45/*
46 * The zcc_magazine is used as a stack to store cached zone elements. These
d9a64523 47 * sets of elements can be moved around to perform bulk operations.
0a7de745 48 */
d9a64523 49struct zcc_magazine {
0a7de745
A
50 uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */
51 uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */
f427ee49 52 vm_offset_t zcc_elements[0]; /* Array of pointers to objects */
d9a64523
A
53};
54
55
f427ee49
A
56/*
57 * Each CPU will use one of these to store its elements
0a7de745 58 */
d9a64523 59struct zcc_per_cpu_cache {
f427ee49
A
60 /* Magazine from which we will always try to allocate from and free to first */
61 struct zcc_magazine *current;
62 /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
63 struct zcc_magazine *previous;
64 /* Zcache poisoning count */
65 uint32_t zp_count;
66#if ZALLOC_DETAILED_STATS
67 uint64_t zcc_allocs;
68 uint64_t zcc_frees;
69#endif /* ZALLOC_DETAILED_STATS */
70};
d9a64523 71
d9a64523
A
72
73/* This is the basic struct to take care of cahing and is included within
0a7de745
A
74 * the zone.
75 */
f427ee49
A
76struct zcc_depot {
77 /* marks the point in the array where empty magazines begin */
78 int zcc_depot_index;
79
80#if ZALLOC_DETAILED_STATS
81 uint64_t zcc_swap;
82 uint64_t zcc_fill;
83 uint64_t zcc_drain;
84 uint64_t zcc_fail;
85 uint64_t zcc_gc;
86#endif /* ZALLOC_DETAILED_STATS */
87
88 /* Stores full and empty magazines in the depot layer */
89 struct zcc_magazine *zcc_depot_list[0];
d9a64523
A
90};
91
f427ee49
A
92static bool zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag);
93static void zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag);
94static bool zcache_mag_has_space(struct zcc_magazine *mag);
95static bool zcache_mag_has_elements(struct zcc_magazine *mag);
96static void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
97static void zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache);
98static void zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache);
99static void zcache_canary_add(zone_t zone, vm_offset_t addr);
100#if ZALLOC_ENABLE_POISONING
101static void zcache_validate_element(zone_t zone, vm_offset_t *addr, bool poison);
102static void zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup);
103#endif
d9a64523
A
104
105/*
106 * zcache_ready
107 *
f427ee49 108 * Returns whether or not the zone caches are ready to use
d9a64523
A
109 *
110 */
f427ee49 111static bool
0a7de745
A
112zcache_ready(void)
113{
f427ee49 114 return magazine_zone != NULL;
d9a64523
A
115}
116
117/*
118 * zcache_bootstrap
119 *
f427ee49
A
120 * Initializes zone to allocate magazines from and sets
121 * magazine_element_count and depot_element_count from
122 * boot-args or default values
d9a64523
A
123 *
124 */
f427ee49
A
125__startup_func
126static void
0a7de745 127zcache_bootstrap(void)
d9a64523 128{
d9a64523 129 int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *);
f427ee49 130 zone_t magzone;
d9a64523
A
131
132 /* Generate the canary value for zone caches */
133 zcache_canary = (uintptr_t) early_random();
134
f427ee49
A
135 magzone = zone_create("zcc_magazine_zone", magazine_size,
136 ZC_NOCACHING | ZC_ZFREE_CLEARMEM);
137
138 /*
139 * This causes zcache_ready() to return true.
140 */
141 os_atomic_store(&magazine_zone, magzone, compiler_acq_rel);
d9a64523 142
f427ee49
A
143 /*
144 * Now that we are initialized, we can enable zone caching for zones that
145 * were made before zcache_bootstrap() was called.
146 *
147 * The system is still single threaded so we don't need to take the lock.
148 */
149 zone_index_foreach(i) {
150 if (zone_array[i].cpu_cache_enabled) {
151 zcache_init(&zone_array[i]);
152 }
153 }
154}
155STARTUP(ZALLOC, STARTUP_RANK_FOURTH, zcache_bootstrap);
156
157static struct zcc_magazine *
158zcache_mag_alloc(void)
159{
160 struct zcc_magazine *mag = zalloc_flags(magazine_zone, Z_WAITOK);
161 mag->zcc_magazine_capacity = magazine_element_count;
162 return mag;
d9a64523
A
163}
164
165
166/*
167 * zcache_init
168 *
f427ee49 169 * Initializes all parts of the per-cpu caches for a given zone
d9a64523 170 *
f427ee49
A
171 * Parameters:
172 * zone pointer to zone on which to iniitalize caching
d9a64523
A
173 *
174 */
0a7de745
A
175void
176zcache_init(zone_t zone)
177{
f427ee49
A
178 struct zcc_per_cpu_cache *pcpu_caches;
179 struct zcc_depot *depot;
180 vm_size_t size;
d9a64523 181
f427ee49
A
182 /*
183 * If zcache hasn't been initialized yet, remember our decision,
184 *
185 * zcache_init() will be called again by zcache_bootstrap(),
186 * while the system is still single threaded, to build the missing caches.
187 */
188 if (!zcache_ready()) {
189 zone->cpu_cache_enabled = true;
190 return;
191 }
0a7de745 192
f427ee49
A
193 /* Allocate chunk of memory for all structs */
194 size = sizeof(struct zcc_depot) + (depot_element_count * sizeof(void *));
195 depot = zalloc_permanent(size, ZALIGN_PTR);
d9a64523 196
f427ee49
A
197 size = sizeof(struct zcc_per_cpu_cache);
198 pcpu_caches = zalloc_percpu_permanent(size, ZALIGN_PTR);
d9a64523 199
0a7de745 200 /* Initialize a cache for every CPU */
f427ee49
A
201 zpercpu_foreach(cache, pcpu_caches) {
202 cache->current = zcache_mag_alloc();
203 cache->previous = zcache_mag_alloc();
204 cache->zp_count = zone_poison_count_init(zone);
0a7de745 205 }
d9a64523 206
d9a64523 207 /* Initialize empty magazines in the depot list */
f427ee49
A
208 for (int i = 0; i < depot_element_count; i++) {
209 depot->zcc_depot_list[i] = zcache_mag_alloc();
210 }
d9a64523 211
f427ee49
A
212 lock_zone(zone);
213 if (zone->zcache.zcc_depot) {
214 panic("allocating caches for zone %s twice", zone->z_name);
d9a64523
A
215 }
216
f427ee49
A
217 /* Make the initialization of the per-cpu magazines visible. */
218 os_atomic_thread_fence(release);
d9a64523 219
f427ee49
A
220 zone->zcache.zcc_depot = depot;
221 zone->zcache.zcc_pcpu = pcpu_caches;
222 zone->cpu_cache_enabled = true;
0a7de745 223 unlock_zone(zone);
0a7de745 224}
d9a64523
A
225
226/*
227 * zcache_drain_depot
228 *
f427ee49
A
229 * Frees all the full magazines from the depot layer to the zone allocator as part
230 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
231 * ensures that)
d9a64523 232 *
f427ee49
A
233 * Parameters:
234 * zone pointer to zone for which the depot layer needs to be drained
d9a64523
A
235 *
236 * Returns: None
237 *
238 */
0a7de745
A
239void
240zcache_drain_depot(zone_t zone)
d9a64523 241{
f427ee49 242 struct zcc_depot *depot;
d9a64523
A
243 int drain_depot_index = 0;
244
f427ee49
A
245 lock_zone(zone);
246 depot = zone->zcache.zcc_depot;
247 drain_depot_index = depot->zcc_depot_index;
0a7de745 248 for (int i = 0; i < drain_depot_index; i++) {
f427ee49 249 zcache_mag_drain_locked(zone, depot->zcc_depot_list[i]);
0a7de745 250 }
f427ee49
A
251#if ZALLOC_DETAILED_STATS
252 depot->zcc_gc += drain_depot_index;
253#endif /* ZALLOC_DETAILED_STATS */
254 depot->zcc_depot_index = 0;
255 unlock_zone(zone);
256}
d9a64523 257
f427ee49
A
258__attribute__((noinline))
259static void
260zcache_free_to_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache)
261{
262 struct zcc_depot *depot;
263
264 lock_zone(zone);
265 depot = zone->zcache.zcc_depot;
266 if (depot->zcc_depot_index < depot_element_count) {
267 /* If able, rotate in a new empty magazine from the depot and retry */
268 zcache_mag_depot_swap_for_free(depot, per_cpu_cache);
269 } else {
270 /* Free an entire magazine of elements */
271 zcache_mag_drain_locked(zone, per_cpu_cache->current);
272#if ZALLOC_DETAILED_STATS
273 depot->zcc_drain++;
274#endif /* ZALLOC_DETAILED_STATS */
275 }
276 unlock_zone(zone);
d9a64523
A
277}
278
279
f427ee49
A
280void
281zcache_free_to_cpu_cache(zone_t zone, zone_stats_t zstats, vm_offset_t addr)
d9a64523 282{
f427ee49
A
283 struct zcc_per_cpu_cache *per_cpu_cache;
284 vm_offset_t elem = addr;
285 int cpu;
286
287 zone_allocated_element_validate(zone, elem);
288
289 /*
290 * This is racy but we don't need zp_count to be accurate.
291 * This allows us to do the poisoning with preemption enabled.
292 */
293 per_cpu_cache = zpercpu_get(zone->zcache.zcc_pcpu);
294 if (zfree_clear_or_poison(zone, &per_cpu_cache->zp_count, elem)) {
295 addr |= ZALLOC_ELEMENT_NEEDS_VALIDATION;
296 } else {
297 zcache_canary_add(zone, elem);
298 }
299
300#if KASAN_ZALLOC
301 kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED);
302#endif
d9a64523
A
303
304 disable_preemption();
f427ee49
A
305 cpu = cpu_number();
306 per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu);
d9a64523
A
307
308 if (zcache_mag_has_space(per_cpu_cache->current)) {
309 /* If able, free into current magazine */
d9a64523
A
310 } else if (zcache_mag_has_space(per_cpu_cache->previous)) {
311 /* If able, swap current and previous magazine and retry */
312 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
0a7de745 313 } else {
f427ee49 314 zcache_free_to_cpu_cache_slow(zone, per_cpu_cache);
d9a64523
A
315 }
316
f427ee49
A
317 struct zcc_magazine *mag = per_cpu_cache->current;
318 mag->zcc_elements[mag->zcc_magazine_index++] = addr;
319 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += zone_elem_size(zone);
320#if ZALLOC_DETAILED_STATS
321 per_cpu_cache->zcc_frees++;
322#endif /* ZALLOC_DETAILED_STATS */
323
d9a64523 324 enable_preemption();
f427ee49 325}
d9a64523 326
f427ee49
A
327__attribute__((noinline))
328static bool
329zcache_alloc_from_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache)
330{
331 struct zcc_depot *depot;
d9a64523 332
f427ee49
A
333 lock_zone(zone);
334 depot = zone->zcache.zcc_depot;
335 if (depot->zcc_depot_index > 0) {
336 /* If able, rotate in a full magazine from the depot */
337 zcache_mag_depot_swap_for_alloc(depot, per_cpu_cache);
338 } else if (zcache_mag_fill_locked(zone, per_cpu_cache->current)) {
339#if ZALLOC_DETAILED_STATS
340 depot->zcc_fill++;
341#endif /* ZALLOC_DETAILED_STATS */
342 } else {
343#if ZALLOC_DETAILED_STATS
344 depot->zcc_fail++;
345#endif /* ZALLOC_DETAILED_STATS */
346 /* If unable to allocate from cache return NULL and fall through to zalloc */
347 unlock_zone(zone);
348 enable_preemption();
349 return false;
350 }
351 unlock_zone(zone);
d9a64523 352
f427ee49 353 return true;
d9a64523
A
354}
355
0a7de745 356vm_offset_t
f427ee49 357zcache_alloc_from_cpu_cache(zone_t zone, zone_stats_t zstats, vm_size_t waste)
d9a64523 358{
f427ee49
A
359 struct zcc_per_cpu_cache *per_cpu_cache;
360 int cpu;
d9a64523
A
361
362 disable_preemption();
f427ee49
A
363 cpu = cpu_number();
364 per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu);
d9a64523
A
365
366 if (zcache_mag_has_elements(per_cpu_cache->current)) {
367 /* If able, allocate from current magazine */
d9a64523
A
368 } else if (zcache_mag_has_elements(per_cpu_cache->previous)) {
369 /* If able, swap current and previous magazine and retry */
370 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
f427ee49
A
371 } else if (!zcache_alloc_from_cpu_cache_slow(zone, per_cpu_cache)) {
372 return (vm_offset_t)NULL;
373 }
374
375 struct zcc_magazine *mag = per_cpu_cache->current;
376 vm_offset_t elem_size = zone_elem_size(zone);
377 uint32_t index = --mag->zcc_magazine_index;
378 vm_offset_t addr = mag->zcc_elements[index];
379 mag->zcc_elements[index] = 0;
380 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += elem_size;
381#if ZALLOC_DETAILED_STATS
382 if (waste) {
383 zpercpu_get_cpu(zstats, cpu)->zs_mem_wasted += waste;
d9a64523 384 }
f427ee49
A
385 per_cpu_cache->zcc_allocs++;
386#else
387 (void)waste;
388#endif /* ZALLOC_DETAILED_STATS */
d9a64523 389
d9a64523 390 enable_preemption();
d9a64523 391
f427ee49
A
392#if ZALLOC_ENABLE_POISONING
393 bool validate = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION;
394#endif /* ZALLOC_ENABLE_POISONING */
395
396 addr &= ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
d9a64523
A
397
398#if KASAN_ZALLOC
f427ee49 399 kasan_poison_range(addr, elem_size, ASAN_VALID);
d9a64523 400#endif
f427ee49
A
401#if ZALLOC_ENABLE_POISONING
402 if (!validate) {
403 vm_offset_t backup = addr + elem_size - sizeof(vm_offset_t);
404 zcache_validate_and_clear_canary(zone, (vm_offset_t *)addr,
405 (vm_offset_t *)backup);
406 }
407 zalloc_validate_element(zone, addr, elem_size, validate);
408#endif /* ZALLOC_ENABLE_POISONING */
d9a64523 409
f427ee49 410 return addr;
d9a64523
A
411}
412
413
414/*
f427ee49 415 * zcache_mag_fill_locked
d9a64523 416 *
f427ee49
A
417 * Fills a magazine with as many elements as the zone can give
418 * without blocking to carve out more memory
d9a64523 419 *
f427ee49
A
420 * Parameters:
421 * zone zone from which to allocate
422 * mag pointer to magazine to fill
d9a64523
A
423 *
424 * Return: True if able to allocate elements, false is mag is still empty
425 */
f427ee49
A
426static bool
427zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag)
d9a64523 428{
f427ee49
A
429 uint32_t i = mag->zcc_magazine_index;
430 uint32_t end = mag->zcc_magazine_capacity;
431 vm_offset_t elem, addr;
432
433 while (i < end && zone->countfree) {
434 addr = zalloc_direct_locked(zone, Z_NOWAIT, 0);
435 elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
436 if (addr & ZALLOC_ELEMENT_NEEDS_VALIDATION) {
437 zone_clear_freelist_pointers(zone, elem);
438 } else {
d9a64523 439 zcache_canary_add(zone, elem);
f427ee49 440 }
d9a64523 441#if KASAN_ZALLOC
f427ee49 442 kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED);
d9a64523 443#endif
f427ee49 444 mag->zcc_elements[i++] = addr;
d9a64523 445 }
d9a64523 446
f427ee49 447 mag->zcc_magazine_index = i;
d9a64523 448
f427ee49 449 return i != 0;
d9a64523
A
450}
451
d9a64523 452/*
f427ee49 453 * zcache_mag_drain_locked
d9a64523 454 *
f427ee49 455 * Frees all elements in a magazine
d9a64523 456 *
f427ee49
A
457 * Parameters:
458 * zone zone to which elements will be freed
459 * mag pointer to magazine to empty
d9a64523 460 *
d9a64523 461 */
f427ee49
A
462static void
463zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag)
d9a64523 464{
f427ee49
A
465 vm_offset_t elem, addr;
466 bool poison;
467
468 for (uint32_t i = 0, end = mag->zcc_magazine_index; i < end; i++) {
469 addr = mag->zcc_elements[i];
470 poison = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION;
471 elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION;
472
473#if ZALLOC_ENABLE_POISONING
474 zcache_validate_element(zone, (vm_offset_t *)elem, poison);
475#endif /* ZALLOC_ENABLE_POISONING */
476 zfree_direct_locked(zone, elem, poison);
477 mag->zcc_elements[i] = 0;
478 }
479 mag->zcc_magazine_index = 0;
d9a64523
A
480}
481
482
483/*
484 * zcache_mag_has_space
485 *
f427ee49 486 * Checks if magazine still has capacity
d9a64523 487 *
f427ee49
A
488 * Parameters:
489 * mag pointer to magazine to check
d9a64523
A
490 *
491 * Returns: true if magazine is full
492 *
493 */
f427ee49 494static bool
0a7de745 495zcache_mag_has_space(struct zcc_magazine *mag)
d9a64523 496{
0a7de745 497 return mag->zcc_magazine_index < mag->zcc_magazine_capacity;
d9a64523
A
498}
499
500
501/*
502 * zcache_mag_has_elements
503 *
f427ee49 504 * Checks if magazine is empty
d9a64523 505 *
f427ee49
A
506 * Parameters:
507 * mag pointer to magazine to check
d9a64523
A
508 *
509 * Returns: true if magazine has no elements
510 *
511 */
f427ee49 512static bool
0a7de745 513zcache_mag_has_elements(struct zcc_magazine *mag)
d9a64523 514{
0a7de745 515 return mag->zcc_magazine_index > 0;
d9a64523
A
516}
517
518
519/*
520 * zcache_swap_magazines
521 *
f427ee49 522 * Function which swaps two pointers of any type
d9a64523 523 *
f427ee49
A
524 * Parameters:
525 * a pointer to first pointer
526 * b pointer to second pointer
d9a64523 527 */
f427ee49 528static void
0a7de745 529zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b)
d9a64523
A
530{
531 struct zcc_magazine *temp = *a;
532 *a = *b;
533 *b = temp;
534}
535
536
537/*
538 * zcache_mag_depot_swap_for_alloc
539 *
f427ee49 540 * Swaps a full magazine into the current position
d9a64523 541 *
f427ee49
A
542 * Parameters:
543 * depot pointer to the depot
544 * cache pointer to the current per-cpu cache
d9a64523
A
545 *
546 * Precondition: Check that the depot list has full elements
547 */
f427ee49
A
548static void
549zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache)
d9a64523
A
550{
551 /* Loads a full magazine from which we can allocate */
f427ee49
A
552 assert(depot->zcc_depot_index > 0);
553 depot->zcc_depot_index--;
554#if ZALLOC_DETAILED_STATS
555 depot->zcc_swap++;
556#endif /* ZALLOC_DETAILED_STATS */
557 zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]);
d9a64523
A
558}
559
560
561/*
562 * zcache_mag_depot_swap_for_free
563 *
f427ee49 564 * Swaps an empty magazine into the current position
d9a64523 565 *
f427ee49
A
566 * Parameters:
567 * depot pointer to the depot
568 * cache pointer to the current per-cpu cache
d9a64523
A
569 *
570 * Precondition: Check that the depot list has empty elements
571 */
f427ee49
A
572static void
573zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache)
d9a64523
A
574{
575 /* Loads an empty magazine into which we can free */
f427ee49
A
576 assert(depot->zcc_depot_index < depot_element_count);
577 zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]);
578#if ZALLOC_DETAILED_STATS
579 depot->zcc_swap++;
580#endif /* ZALLOC_DETAILED_STATS */
581 depot->zcc_depot_index++;
d9a64523
A
582}
583
584/*
585 * zcache_canary_add
586 *
f427ee49
A
587 * Adds a canary to an element by putting zcache_canary at the first
588 * and last location of the element
d9a64523 589 *
f427ee49
A
590 * Parameters:
591 * zone zone for the element
592 * addr element address to add canary to
d9a64523 593 */
f427ee49
A
594static void
595zcache_canary_add(zone_t zone, vm_offset_t element)
d9a64523 596{
f427ee49 597#if ZALLOC_ENABLE_POISONING
d9a64523 598 vm_offset_t *primary = (vm_offset_t *)element;
f427ee49
A
599 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary +
600 zone_elem_size(zone) - sizeof(vm_offset_t));
d9a64523 601 *primary = *backup = (zcache_canary ^ (uintptr_t)element);
f427ee49
A
602#else
603#pragma unused(zone, element)
604#endif
605}
606
607#if ZALLOC_ENABLE_POISONING
608__abortlike static void
609zcache_validation_panic(zone_t zone, vm_offset_t *primary, vm_offset_t *backup,
610 vm_offset_t permutation)
611{
612 vm_offset_t primary_value = 0;
613 vm_offset_t backup_value = 0;
614
615 if (permutation == zcache_canary) {
616 primary_value = *primary ^ (vm_offset_t)primary;
617 backup_value = *backup ^ (vm_offset_t)primary;
618 permutation = permutation ^ (vm_offset_t)primary;
619 } else {
620 primary_value = *primary;
621 backup_value = *backup;
622 }
623 if (primary_value != permutation) {
624 panic("Zone cache element was used after free! Element %p was corrupted at "
625 "beginning; Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
626 primary, (uintptr_t) permutation, (uintptr_t) *primary, zcache_canary, zone,
627 zone_heap_name(zone), zone->z_name);
628 } else {
629 panic("Zone cache element was used after free! Element %p was corrupted at end; "
630 "Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
631 primary, (uintptr_t) permutation, (uintptr_t) *backup, zcache_canary, zone,
632 zone_heap_name(zone), zone->z_name);
633 }
d9a64523
A
634}
635
636/*
f427ee49
A
637 * zcache_validate_and_clear_canary
638 *
639 * Validates an element of the zone cache to make sure it still contains the zone
640 * caching canary and clears it.
d9a64523 641 *
f427ee49
A
642 * Parameters:
643 * zone zone for the element
644 * primary addr of canary placed in front
645 * backup addr of canary placed at the back
646 */
647static void
648zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup)
649{
650 vm_offset_t primary_value = (*primary ^ (uintptr_t)primary);
651 vm_offset_t backup_value = (*backup ^ (uintptr_t)primary);
652
653 if (primary_value == zcache_canary && backup_value == zcache_canary) {
654 *primary = *backup = ZONE_POISON;
655 } else {
656 zcache_validation_panic(zone, primary, backup, zcache_canary);
657 }
658}
659
660/*
661 * zcache_validate_element
d9a64523 662 *
f427ee49
A
663 * Validates the first and last pointer size of the element to ensure
664 * that they haven't been altered. This function is used when an
665 * element moves from cache to zone, therefore only validing the
666 * first and last pointer size (location of future freelist pointers).
d9a64523 667 *
f427ee49
A
668 * Parameters:
669 * zone zone for the element
670 * element addr of element to validate
671 * poison has the element been poisoned
d9a64523 672 */
f427ee49
A
673static void
674zcache_validate_element(zone_t zone, vm_offset_t *element, bool poison)
d9a64523
A
675{
676 vm_offset_t *primary = (vm_offset_t *)element;
f427ee49
A
677 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary +
678 zone_elem_size(zone) - sizeof(vm_offset_t));
d9a64523 679
f427ee49
A
680 if (zone->zfree_clear_mem) {
681 if (*primary == 0 && *backup == 0) {
682 return;
683 } else {
684 zcache_validation_panic(zone, primary, backup, 0);
685 }
d9a64523 686 }
0a7de745 687
f427ee49
A
688 if (__probable(!poison)) {
689 zcache_validate_and_clear_canary(zone, primary, backup);
690 } else {
691 if (*primary == ZONE_POISON && *backup == ZONE_POISON) {
692 return;
693 } else {
694 zcache_validation_panic(zone, primary, backup, ZONE_POISON);
695 }
d9a64523
A
696 }
697}
f427ee49 698#endif /* ZALLOC_ENABLE_POISONING */