]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zcache.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / zcache.c
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/assert.h>
30 #include <kern/cpu_data.h>
31 #include <mach/mach_host.h>
32 #include <vm/vm_kern.h>
33
34
35 #if defined(__i386__) || defined(__x86_64__)
36 #include <i386/mp.h>
37 #endif
38
39 #if defined (__arm__) || defined (__arm64__)
40 #include <arm/cpu_data_internal.h>
41 #endif
42
43 #define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */
44 #define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */
45 #define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/
46
47 lck_grp_t zcache_locks_grp; /* lock group for depot_lock */
48 zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */
49 uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */
50 uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */
51 bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */
52 uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */
53
54 /* The zcc_magazine is used as a stack to store cached zone elements. These
55 * sets of elements can be moved around to perform bulk operations.
56 */
57 struct zcc_magazine {
58 uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */
59 uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */
60 void *zcc_elements[0]; /* Array of pointers to objects */
61 };
62
63
64 /* Each CPU will use one of these to store its elements
65 */
66 struct zcc_per_cpu_cache {
67 struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */
68 struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
69 } __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */
70
71
72 /*
73 * The depot layer can be invalid while zone_gc() is draining it out.
74 * During that time, the CPU caches are active. For CPU magazine allocs and
75 * frees, the caching layer reaches directly into the zone allocator.
76 */
77 #define ZCACHE_DEPOT_INVALID -1
78 #define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID)
79
80 /* This is the basic struct to take care of cahing and is included within
81 * the zone.
82 */
83 struct zone_cache {
84 lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */
85 struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */
86 int zcc_depot_index; /* marks the point in the array where empty magazines begin */
87 struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */
88 };
89
90
91 void zcache_init_marked_zones(void);
92 bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag);
93 void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag);
94 void zcache_mag_init(struct zcc_magazine *mag, int count);
95 void *zcache_mag_pop(struct zcc_magazine *mag);
96 void zcache_mag_push(struct zcc_magazine *mag, void *elem);
97 bool zcache_mag_has_space(struct zcc_magazine *mag);
98 bool zcache_mag_has_elements(struct zcc_magazine *mag);
99 void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
100 void zcache_mag_depot_swap_for_alloc(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
101 void zcache_mag_depot_swap_for_free(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
102 void zcache_mag_depot_swap(struct zone_cache *depot, struct zcc_per_cpu_cache *cache, boolean_t load_full);
103 void zcache_canary_add(zone_t zone, void *addr);
104 void zcache_canary_validate(zone_t zone, void *addr);
105
106 /*
107 * zcache_ready
108 *
109 * Description: returns whether or not the zone caches are ready to use
110 *
111 */
112 bool
113 zcache_ready(void)
114 {
115 return zone_cache_ready;
116 }
117
118 /*
119 * zcache_init_marked_zones
120 *
121 * Description: Initializes all parts of the per-cpu caches for the list of
122 * marked zones once we are able to initalize caches. This should
123 * only be called once, and will be called during the time that the
124 * system is single threaded so we don't have to take the lock.
125 *
126 */
127 void
128 zcache_init_marked_zones(void)
129 {
130 unsigned int i;
131 for (i = 0; i < num_zones; i++) {
132 if (zone_array[i].cpu_cache_enable_when_ready) {
133 zcache_init(&zone_array[i]);
134 zone_array[i].cpu_cache_enable_when_ready = FALSE;
135 }
136 }
137 }
138
139 /*
140 * zcache_bootstrap
141 *
142 * Description: initializes zone to allocate magazines from and sets
143 * magazine_element_count and depot_element_count from
144 * boot-args or default values
145 *
146 */
147 void
148 zcache_bootstrap(void)
149 {
150 /* use boot-arg for custom magazine size*/
151 if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof(uint16_t))) {
152 magazine_element_count = DEFAULT_MAGAZINE_SIZE;
153 }
154
155 int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *);
156
157 magazine_zone = zinit(magazine_size, 100000 * magazine_size, magazine_size, "zcc_magazine_zone");
158
159 assert(magazine_zone != NULL);
160
161 /* use boot-arg for custom depot size*/
162 if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof(uint16_t))) {
163 depot_element_count = DEFAULT_DEPOT_SIZE;
164 }
165
166 lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL);
167
168 /* Generate the canary value for zone caches */
169 zcache_canary = (uintptr_t) early_random();
170
171 zone_cache_ready = TRUE;
172
173 zcache_init_marked_zones();
174 }
175
176
177 /*
178 * zcache_init
179 *
180 * Description: Initializes all parts of the per-cpu caches for a given zone
181 *
182 * Parameters: zone pointer to zone on which to iniitalize caching
183 *
184 */
185 void
186 zcache_init(zone_t zone)
187 {
188 int i; /* used as index in for loops */
189 vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */
190 struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */
191
192 /* Allocate chunk of memory for all structs */
193 total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *));
194
195 temp_cache = (struct zone_cache *) kalloc(total_size);
196
197
198 /* Initialize a cache for every CPU */
199 for (i = 0; i < MAX_CPUS; i++) {
200 temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone);
201 temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone);
202
203 assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL);
204
205 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count);
206 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count);
207 }
208
209 /* Initialize the lock on the depot layer */
210 lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL);
211
212 /* Initialize empty magazines in the depot list */
213 for (i = 0; i < depot_element_count; i++) {
214 temp_cache->zcc_depot_list[i] = (struct zcc_magazine *)zalloc(magazine_zone);
215
216 assert(temp_cache->zcc_depot_list[i] != NULL);
217
218 zcache_mag_init(temp_cache->zcc_depot_list[i], magazine_element_count);
219 }
220
221 temp_cache->zcc_depot_index = 0;
222
223 lock_zone(zone);
224 zone->zcache = temp_cache;
225 /* Set flag to know caching is enabled */
226 zone->cpu_cache_enabled = TRUE;
227 unlock_zone(zone);
228 return;
229 }
230
231 /*
232 * zcache_drain_depot
233 *
234 * Description: Frees all the full magazines from the depot layer to the zone allocator as part
235 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
236 * ensures that)
237 *
238 * Parameters: zone pointer to zone for which the depot layer needs to be drained
239 *
240 * Returns: None
241 *
242 */
243 void
244 zcache_drain_depot(zone_t zone)
245 {
246 struct zone_cache *zcache = zone->zcache;
247 int drain_depot_index = 0;
248
249 /*
250 * Grab the current depot list from the zone cache. If it has full magazines,
251 * mark the depot as invalid and drain it.
252 */
253 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
254 if (!zcache_depot_available(zcache) || (zcache->zcc_depot_index == 0)) {
255 /* no full magazines in the depot or depot unavailable; nothing to drain here */
256 lck_mtx_unlock(&(zcache->zcc_depot_lock));
257 return;
258 }
259 drain_depot_index = zcache->zcc_depot_index;
260 /* Mark the depot as unavailable */
261 zcache->zcc_depot_index = ZCACHE_DEPOT_INVALID;
262 lck_mtx_unlock(&(zcache->zcc_depot_lock));
263
264 /* Now drain the full magazines in the depot */
265 for (int i = 0; i < drain_depot_index; i++) {
266 zcache_mag_drain(zone, zcache->zcc_depot_list[i]);
267 }
268
269 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
270 /* Mark the depot as available again */
271 zcache->zcc_depot_index = 0;
272 lck_mtx_unlock(&(zcache->zcc_depot_lock));
273 }
274
275
276 /*
277 * zcache_free_to_cpu_cache
278 *
279 * Description: Checks per-cpu caches to free element there if possible
280 *
281 * Parameters: zone pointer to zone for which element comes from
282 * addr pointer to element to free
283 *
284 * Returns: TRUE if successfull, FALSE otherwise
285 *
286 * Precondition: check that caching is enabled for zone
287 */
288 bool
289 zcache_free_to_cpu_cache(zone_t zone, void *addr)
290 {
291 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
292 struct zone_cache *zcache; /* local storage of the zone's cache */
293 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
294
295 disable_preemption();
296 curcpu = current_processor()->cpu_id;
297 zcache = zone->zcache;
298 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
299
300 if (zcache_mag_has_space(per_cpu_cache->current)) {
301 /* If able, free into current magazine */
302 goto free_to_current;
303 } else if (zcache_mag_has_space(per_cpu_cache->previous)) {
304 /* If able, swap current and previous magazine and retry */
305 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
306 goto free_to_current;
307 } else {
308 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
309 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) {
310 /* If able, rotate in a new empty magazine from the depot and retry */
311 zcache_mag_depot_swap_for_free(zcache, per_cpu_cache);
312 lck_mtx_unlock(&(zcache->zcc_depot_lock));
313 goto free_to_current;
314 }
315 lck_mtx_unlock(&(zcache->zcc_depot_lock));
316 /* Attempt to free an entire magazine of elements */
317 zcache_mag_drain(zone, per_cpu_cache->current);
318 if (zcache_mag_has_space(per_cpu_cache->current)) {
319 goto free_to_current;
320 }
321 }
322
323 /* If not able to use cache return FALSE and fall through to zfree */
324 enable_preemption();
325 return FALSE;
326
327 free_to_current:
328 assert(zcache_mag_has_space(per_cpu_cache->current));
329 zcache_canary_add(zone, addr);
330 zcache_mag_push(per_cpu_cache->current, addr);
331
332 #if KASAN_ZALLOC
333 kasan_poison_range((vm_offset_t)addr, zone->elem_size, ASAN_HEAP_FREED);
334 #endif
335
336 enable_preemption();
337 return TRUE;
338 }
339
340
341 /*
342 * zcache_alloc_from_cpu_cache
343 *
344 * Description: Checks per-cpu caches to allocate element from there if possible
345 *
346 * Parameters: zone pointer to zone for which element will come from
347 *
348 * Returns: pointer to usable element
349 *
350 * Precondition: check that caching is enabled for zone
351 */
352 vm_offset_t
353 zcache_alloc_from_cpu_cache(zone_t zone)
354 {
355 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
356 void *ret = NULL; /* Points to the element which will be returned */
357 struct zone_cache *zcache; /* local storage of the zone's cache */
358 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
359
360 disable_preemption();
361 curcpu = current_processor()->cpu_id;
362 zcache = zone->zcache;
363 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
364
365 if (zcache_mag_has_elements(per_cpu_cache->current)) {
366 /* If able, allocate from current magazine */
367 goto allocate_from_current;
368 } else if (zcache_mag_has_elements(per_cpu_cache->previous)) {
369 /* If able, swap current and previous magazine and retry */
370 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
371 goto allocate_from_current;
372 } else {
373 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
374 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index > 0)) {
375 /* If able, rotate in a full magazine from the depot */
376 zcache_mag_depot_swap_for_alloc(zcache, per_cpu_cache);
377 lck_mtx_unlock(&(zcache->zcc_depot_lock));
378 goto allocate_from_current;
379 }
380 lck_mtx_unlock(&(zcache->zcc_depot_lock));
381 /* Attempt to allocate an entire magazine of elements */
382 if (zcache_mag_fill(zone, per_cpu_cache->current)) {
383 goto allocate_from_current;
384 }
385 }
386
387 /* If unable to allocate from cache return NULL and fall through to zalloc */
388 enable_preemption();
389 return (vm_offset_t) NULL;
390
391 allocate_from_current:
392 ret = zcache_mag_pop(per_cpu_cache->current);
393 assert(ret != NULL);
394 zcache_canary_validate(zone, ret);
395
396 #if KASAN_ZALLOC
397 kasan_poison_range((vm_offset_t)ret, zone->elem_size, ASAN_VALID);
398 #endif
399
400 enable_preemption();
401 return (vm_offset_t) ret;
402 }
403
404
405 /*
406 * zcache_mag_init
407 *
408 * Description: initializes fields in a zcc_magazine struct
409 *
410 * Parameters: mag pointer to magazine to initialize
411 *
412 */
413 void
414 zcache_mag_init(struct zcc_magazine *mag, int count)
415 {
416 mag->zcc_magazine_index = 0;
417 mag->zcc_magazine_capacity = count;
418 }
419
420
421 /*
422 * zcache_mag_fill
423 *
424 * Description: fills a magazine with as many elements as the zone can give
425 * without blocking to carve out more memory
426 *
427 * Parameters: zone zone from which to allocate
428 * mag pointer to magazine to fill
429 *
430 * Return: True if able to allocate elements, false is mag is still empty
431 */
432 bool
433 zcache_mag_fill(zone_t zone, struct zcc_magazine *mag)
434 {
435 assert(mag->zcc_magazine_index == 0);
436 void* elem = NULL;
437 uint32_t i;
438 lock_zone(zone);
439 for (i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i++) {
440 elem = zalloc_attempt(zone);
441 if (elem) {
442 zcache_canary_add(zone, elem);
443 zcache_mag_push(mag, elem);
444 #if KASAN_ZALLOC
445 kasan_poison_range((vm_offset_t)elem, zone->elem_size, ASAN_HEAP_FREED);
446 #endif
447 } else {
448 break;
449 }
450 }
451 unlock_zone(zone);
452 if (i == 0) {
453 return FALSE;
454 }
455 return TRUE;
456 }
457
458 /*
459 * zcache_mag_drain
460 *
461 * Description: frees all elements in a magazine
462 *
463 * Parameters: zone zone to which elements will be freed
464 * mag pointer to magazine to empty
465 *
466 */
467 void
468 zcache_mag_drain(zone_t zone, struct zcc_magazine *mag)
469 {
470 assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity);
471 lock_zone(zone);
472 while (mag->zcc_magazine_index > 0) {
473 uint32_t index = --mag->zcc_magazine_index;
474 zcache_canary_validate(zone, mag->zcc_elements[index]);
475 zfree_direct(zone, (vm_offset_t)mag->zcc_elements[index]);
476 mag->zcc_elements[mag->zcc_magazine_index] = 0;
477 }
478 unlock_zone(zone);
479 }
480
481 /*
482 * zcache_mag_pop
483 *
484 * Description: removes last element from magazine in a stack pop fashion
485 * zcc_magazine_index represents the number of elements on the
486 * stack, so it the index of where to save the next element, when
487 * full, it will be 1 past the last index of the array
488 *
489 * Parameters: mag pointer to magazine from which to remove element
490 *
491 * Returns: pointer to element removed from magazine
492 *
493 * Precondition: must check that magazine is not empty before calling
494 */
495 void *
496 zcache_mag_pop(struct zcc_magazine *mag)
497 {
498 void *elem;
499 assert(zcache_mag_has_elements(mag));
500 elem = mag->zcc_elements[--mag->zcc_magazine_index];
501 /* Ensure pointer to element cannot be accessed after we pop it */
502 mag->zcc_elements[mag->zcc_magazine_index] = NULL;
503 assert(elem != NULL);
504 return elem;
505 }
506
507
508 /*
509 * zcache_mag_push
510 *
511 * Description: adds element to magazine and increments zcc_magazine_index
512 * zcc_magazine_index represents the number of elements on the
513 * stack, so it the index of where to save the next element, when
514 * full, it will be 1 past the last index of the array
515 *
516 * Parameters: mag pointer to magazine from which to remove element
517 * elem pointer to element to add
518 *
519 * Precondition: must check that magazine is not full before calling
520 */
521 void
522 zcache_mag_push(struct zcc_magazine *mag, void *elem)
523 {
524 assert(zcache_mag_has_space(mag));
525 mag->zcc_elements[mag->zcc_magazine_index++] = elem;
526 }
527
528
529 /*
530 * zcache_mag_has_space
531 *
532 * Description: checks if magazine still has capacity
533 *
534 * Parameters: mag pointer to magazine to check
535 *
536 * Returns: true if magazine is full
537 *
538 */
539 bool
540 zcache_mag_has_space(struct zcc_magazine *mag)
541 {
542 return mag->zcc_magazine_index < mag->zcc_magazine_capacity;
543 }
544
545
546 /*
547 * zcache_mag_has_elements
548 *
549 * Description: checks if magazine is empty
550 *
551 * Parameters: mag pointer to magazine to check
552 *
553 * Returns: true if magazine has no elements
554 *
555 */
556 bool
557 zcache_mag_has_elements(struct zcc_magazine *mag)
558 {
559 return mag->zcc_magazine_index > 0;
560 }
561
562
563 /*
564 * zcache_swap_magazines
565 *
566 * Description: Function which swaps two pointers of any type
567 *
568 * Parameters: a pointer to first pointer
569 * b pointer to second pointer
570 */
571 void
572 zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b)
573 {
574 struct zcc_magazine *temp = *a;
575 *a = *b;
576 *b = temp;
577 }
578
579
580 /*
581 * zcache_mag_depot_swap_for_alloc
582 *
583 * Description: Swaps a full magazine into the current position
584 *
585 * Parameters: zcache pointer to the zone_cache to access the depot
586 * cache pointer to the current per-cpu cache
587 *
588 * Precondition: Check that the depot list has full elements
589 */
590 void
591 zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
592 {
593 /* Loads a full magazine from which we can allocate */
594 assert(zcache_depot_available(zcache));
595 assert(zcache->zcc_depot_index > 0);
596 zcache->zcc_depot_index--;
597 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
598 }
599
600
601 /*
602 * zcache_mag_depot_swap_for_free
603 *
604 * Description: Swaps an empty magazine into the current position
605 *
606 * Parameters: zcache pointer to the zone_cache to access the depot
607 * cache pointer to the current per-cpu cache
608 *
609 * Precondition: Check that the depot list has empty elements
610 */
611 void
612 zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
613 {
614 /* Loads an empty magazine into which we can free */
615 assert(zcache_depot_available(zcache));
616 assert(zcache->zcc_depot_index < depot_element_count);
617 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
618 zcache->zcc_depot_index++;
619 }
620
621 /*
622 * zcache_canary_add
623 *
624 * Description: Adds a canary to an element by putting zcache_canary at the first
625 * and last location of the element
626 *
627 * Parameters: zone zone for the element
628 * addr element address to add canary to
629 *
630 */
631 void
632 zcache_canary_add(zone_t zone, void *element)
633 {
634 vm_offset_t *primary = (vm_offset_t *)element;
635 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
636 *primary = *backup = (zcache_canary ^ (uintptr_t)element);
637 }
638
639 /*
640 * zcache_canary_validate
641 *
642 * Description: Validates an element of the zone cache to make sure it still contains the zone
643 * caching canary.
644 *
645 * Parameters: zone zone for the element
646 * addr element address to validate
647 *
648 */
649 void
650 zcache_canary_validate(zone_t zone, void *element)
651 {
652 vm_offset_t *primary = (vm_offset_t *)element;
653 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
654
655 vm_offset_t primary_value = (*primary ^ (uintptr_t)element);
656 if (primary_value != zcache_canary) {
657 panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p; zone %p (%s)",
658 element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*primary), (void *)zcache_canary, zone, zone->zone_name);
659 }
660
661 vm_offset_t backup_value = (*backup ^ (uintptr_t)element);
662 if (backup_value != zcache_canary) {
663 panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p; zone %p (%s)",
664 element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary, zone, zone->zone_name);
665 }
666 }