]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/zcache.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kern / zcache.c
CommitLineData
d9a64523
A
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <kern/cpu_data.h>
31#include <mach/mach_host.h>
32#include <vm/vm_kern.h>
33
34
35#if defined(__i386__) || defined(__x86_64__)
36#include <i386/mp.h>
37#endif
38
39#if defined (__arm__) || defined (__arm64__)
40#include <arm/cpu_data_internal.h>
41#endif
42
0a7de745
A
43#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */
44#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */
45#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/
d9a64523 46
0a7de745
A
47lck_grp_t zcache_locks_grp; /* lock group for depot_lock */
48zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */
49uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */
50uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */
51bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */
52uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */
d9a64523
A
53
54/* The zcc_magazine is used as a stack to store cached zone elements. These
55 * sets of elements can be moved around to perform bulk operations.
0a7de745 56 */
d9a64523 57struct zcc_magazine {
0a7de745
A
58 uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */
59 uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */
60 void *zcc_elements[0]; /* Array of pointers to objects */
d9a64523
A
61};
62
63
0a7de745
A
64/* Each CPU will use one of these to store its elements
65 */
d9a64523 66struct zcc_per_cpu_cache {
0a7de745
A
67 struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */
68 struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
69} __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */
d9a64523
A
70
71
72/*
73 * The depot layer can be invalid while zone_gc() is draining it out.
0a7de745 74 * During that time, the CPU caches are active. For CPU magazine allocs and
d9a64523
A
75 * frees, the caching layer reaches directly into the zone allocator.
76 */
0a7de745
A
77#define ZCACHE_DEPOT_INVALID -1
78#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID)
d9a64523
A
79
80/* This is the basic struct to take care of cahing and is included within
0a7de745
A
81 * the zone.
82 */
d9a64523 83struct zone_cache {
0a7de745
A
84 lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */
85 struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */
86 int zcc_depot_index; /* marks the point in the array where empty magazines begin */
87 struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */
d9a64523
A
88};
89
90
91void zcache_init_marked_zones(void);
92bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag);
93void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag);
94void zcache_mag_init(struct zcc_magazine *mag, int count);
95void *zcache_mag_pop(struct zcc_magazine *mag);
96void zcache_mag_push(struct zcc_magazine *mag, void *elem);
97bool zcache_mag_has_space(struct zcc_magazine *mag);
98bool zcache_mag_has_elements(struct zcc_magazine *mag);
99void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b);
100void zcache_mag_depot_swap_for_alloc(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
101void zcache_mag_depot_swap_for_free(struct zone_cache *depot, struct zcc_per_cpu_cache *cache);
102void zcache_mag_depot_swap(struct zone_cache *depot, struct zcc_per_cpu_cache *cache, boolean_t load_full);
103void zcache_canary_add(zone_t zone, void *addr);
104void zcache_canary_validate(zone_t zone, void *addr);
105
106/*
107 * zcache_ready
108 *
109 * Description: returns whether or not the zone caches are ready to use
110 *
111 */
0a7de745
A
112bool
113zcache_ready(void)
114{
d9a64523
A
115 return zone_cache_ready;
116}
117
118/*
119 * zcache_init_marked_zones
120 *
121 * Description: Initializes all parts of the per-cpu caches for the list of
122 * marked zones once we are able to initalize caches. This should
123 * only be called once, and will be called during the time that the
124 * system is single threaded so we don't have to take the lock.
125 *
126 */
0a7de745
A
127void
128zcache_init_marked_zones(void)
129{
d9a64523 130 unsigned int i;
0a7de745
A
131 for (i = 0; i < num_zones; i++) {
132 if (zone_array[i].cpu_cache_enable_when_ready) {
d9a64523
A
133 zcache_init(&zone_array[i]);
134 zone_array[i].cpu_cache_enable_when_ready = FALSE;
135 }
136 }
137}
138
139/*
140 * zcache_bootstrap
141 *
142 * Description: initializes zone to allocate magazines from and sets
143 * magazine_element_count and depot_element_count from
144 * boot-args or default values
145 *
146 */
0a7de745
A
147void
148zcache_bootstrap(void)
d9a64523
A
149{
150 /* use boot-arg for custom magazine size*/
0a7de745 151 if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof(uint16_t))) {
d9a64523 152 magazine_element_count = DEFAULT_MAGAZINE_SIZE;
0a7de745 153 }
d9a64523
A
154
155 int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *);
156
0a7de745 157 magazine_zone = zinit(magazine_size, 100000 * magazine_size, magazine_size, "zcc_magazine_zone");
d9a64523
A
158
159 assert(magazine_zone != NULL);
160
161 /* use boot-arg for custom depot size*/
0a7de745 162 if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof(uint16_t))) {
d9a64523 163 depot_element_count = DEFAULT_DEPOT_SIZE;
0a7de745 164 }
d9a64523
A
165
166 lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL);
167
168 /* Generate the canary value for zone caches */
169 zcache_canary = (uintptr_t) early_random();
170
171 zone_cache_ready = TRUE;
172
173 zcache_init_marked_zones();
174}
175
176
177/*
178 * zcache_init
179 *
180 * Description: Initializes all parts of the per-cpu caches for a given zone
181 *
182 * Parameters: zone pointer to zone on which to iniitalize caching
183 *
184 */
0a7de745
A
185void
186zcache_init(zone_t zone)
187{
188 int i; /* used as index in for loops */
189 vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */
190 struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */
d9a64523
A
191
192 /* Allocate chunk of memory for all structs */
193 total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *));
0a7de745 194
d9a64523
A
195 temp_cache = (struct zone_cache *) kalloc(total_size);
196
197
0a7de745
A
198 /* Initialize a cache for every CPU */
199 for (i = 0; i < MAX_CPUS; i++) {
200 temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone);
201 temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone);
d9a64523 202
0a7de745 203 assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL);
d9a64523 204
0a7de745
A
205 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count);
206 zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count);
207 }
d9a64523 208
0a7de745
A
209 /* Initialize the lock on the depot layer */
210 lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL);
d9a64523
A
211
212 /* Initialize empty magazines in the depot list */
213 for (i = 0; i < depot_element_count; i++) {
214 temp_cache->zcc_depot_list[i] = (struct zcc_magazine *)zalloc(magazine_zone);
215
216 assert(temp_cache->zcc_depot_list[i] != NULL);
217
218 zcache_mag_init(temp_cache->zcc_depot_list[i], magazine_element_count);
219 }
220
221 temp_cache->zcc_depot_index = 0;
222
0a7de745 223 lock_zone(zone);
d9a64523 224 zone->zcache = temp_cache;
0a7de745
A
225 /* Set flag to know caching is enabled */
226 zone->cpu_cache_enabled = TRUE;
227 unlock_zone(zone);
228 return;
229}
d9a64523
A
230
231/*
232 * zcache_drain_depot
233 *
234 * Description: Frees all the full magazines from the depot layer to the zone allocator as part
235 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
236 * ensures that)
237 *
238 * Parameters: zone pointer to zone for which the depot layer needs to be drained
239 *
240 * Returns: None
241 *
242 */
0a7de745
A
243void
244zcache_drain_depot(zone_t zone)
d9a64523
A
245{
246 struct zone_cache *zcache = zone->zcache;
247 int drain_depot_index = 0;
248
249 /*
0a7de745 250 * Grab the current depot list from the zone cache. If it has full magazines,
d9a64523
A
251 * mark the depot as invalid and drain it.
252 */
253 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
254 if (!zcache_depot_available(zcache) || (zcache->zcc_depot_index == 0)) {
255 /* no full magazines in the depot or depot unavailable; nothing to drain here */
256 lck_mtx_unlock(&(zcache->zcc_depot_lock));
257 return;
258 }
259 drain_depot_index = zcache->zcc_depot_index;
260 /* Mark the depot as unavailable */
261 zcache->zcc_depot_index = ZCACHE_DEPOT_INVALID;
262 lck_mtx_unlock(&(zcache->zcc_depot_lock));
263
264 /* Now drain the full magazines in the depot */
0a7de745 265 for (int i = 0; i < drain_depot_index; i++) {
d9a64523 266 zcache_mag_drain(zone, zcache->zcc_depot_list[i]);
0a7de745 267 }
d9a64523
A
268
269 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
270 /* Mark the depot as available again */
271 zcache->zcc_depot_index = 0;
272 lck_mtx_unlock(&(zcache->zcc_depot_lock));
273}
274
275
276/*
277 * zcache_free_to_cpu_cache
278 *
279 * Description: Checks per-cpu caches to free element there if possible
280 *
281 * Parameters: zone pointer to zone for which element comes from
282 * addr pointer to element to free
283 *
284 * Returns: TRUE if successfull, FALSE otherwise
285 *
286 * Precondition: check that caching is enabled for zone
287 */
0a7de745
A
288bool
289zcache_free_to_cpu_cache(zone_t zone, void *addr)
d9a64523 290{
0a7de745
A
291 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
292 struct zone_cache *zcache; /* local storage of the zone's cache */
293 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
d9a64523
A
294
295 disable_preemption();
296 curcpu = current_processor()->cpu_id;
297 zcache = zone->zcache;
298 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
299
300 if (zcache_mag_has_space(per_cpu_cache->current)) {
301 /* If able, free into current magazine */
302 goto free_to_current;
303 } else if (zcache_mag_has_space(per_cpu_cache->previous)) {
304 /* If able, swap current and previous magazine and retry */
305 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
306 goto free_to_current;
0a7de745 307 } else {
d9a64523
A
308 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
309 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) {
310 /* If able, rotate in a new empty magazine from the depot and retry */
311 zcache_mag_depot_swap_for_free(zcache, per_cpu_cache);
312 lck_mtx_unlock(&(zcache->zcc_depot_lock));
313 goto free_to_current;
314 }
315 lck_mtx_unlock(&(zcache->zcc_depot_lock));
316 /* Attempt to free an entire magazine of elements */
317 zcache_mag_drain(zone, per_cpu_cache->current);
0a7de745 318 if (zcache_mag_has_space(per_cpu_cache->current)) {
d9a64523
A
319 goto free_to_current;
320 }
321 }
322
323 /* If not able to use cache return FALSE and fall through to zfree */
324 enable_preemption();
325 return FALSE;
326
327free_to_current:
328 assert(zcache_mag_has_space(per_cpu_cache->current));
329 zcache_canary_add(zone, addr);
330 zcache_mag_push(per_cpu_cache->current, addr);
331
332#if KASAN_ZALLOC
333 kasan_poison_range((vm_offset_t)addr, zone->elem_size, ASAN_HEAP_FREED);
334#endif
335
336 enable_preemption();
337 return TRUE;
338}
339
340
341/*
342 * zcache_alloc_from_cpu_cache
343 *
344 * Description: Checks per-cpu caches to allocate element from there if possible
345 *
346 * Parameters: zone pointer to zone for which element will come from
347 *
348 * Returns: pointer to usable element
349 *
350 * Precondition: check that caching is enabled for zone
351 */
0a7de745
A
352vm_offset_t
353zcache_alloc_from_cpu_cache(zone_t zone)
d9a64523 354{
0a7de745
A
355 int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
356 void *ret = NULL; /* Points to the element which will be returned */
357 struct zone_cache *zcache; /* local storage of the zone's cache */
358 struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */
d9a64523
A
359
360 disable_preemption();
361 curcpu = current_processor()->cpu_id;
362 zcache = zone->zcache;
363 per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu];
364
365 if (zcache_mag_has_elements(per_cpu_cache->current)) {
366 /* If able, allocate from current magazine */
367 goto allocate_from_current;
368 } else if (zcache_mag_has_elements(per_cpu_cache->previous)) {
369 /* If able, swap current and previous magazine and retry */
370 zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current);
371 goto allocate_from_current;
372 } else {
373 lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock));
374 if (zcache_depot_available(zcache) && (zcache->zcc_depot_index > 0)) {
375 /* If able, rotate in a full magazine from the depot */
376 zcache_mag_depot_swap_for_alloc(zcache, per_cpu_cache);
377 lck_mtx_unlock(&(zcache->zcc_depot_lock));
378 goto allocate_from_current;
379 }
380 lck_mtx_unlock(&(zcache->zcc_depot_lock));
381 /* Attempt to allocate an entire magazine of elements */
0a7de745 382 if (zcache_mag_fill(zone, per_cpu_cache->current)) {
d9a64523
A
383 goto allocate_from_current;
384 }
385 }
386
387 /* If unable to allocate from cache return NULL and fall through to zalloc */
388 enable_preemption();
389 return (vm_offset_t) NULL;
390
391allocate_from_current:
392 ret = zcache_mag_pop(per_cpu_cache->current);
393 assert(ret != NULL);
394 zcache_canary_validate(zone, ret);
395
396#if KASAN_ZALLOC
397 kasan_poison_range((vm_offset_t)ret, zone->elem_size, ASAN_VALID);
398#endif
399
400 enable_preemption();
401 return (vm_offset_t) ret;
402}
403
404
405/*
406 * zcache_mag_init
407 *
408 * Description: initializes fields in a zcc_magazine struct
409 *
410 * Parameters: mag pointer to magazine to initialize
411 *
412 */
0a7de745
A
413void
414zcache_mag_init(struct zcc_magazine *mag, int count)
d9a64523
A
415{
416 mag->zcc_magazine_index = 0;
417 mag->zcc_magazine_capacity = count;
418}
419
420
421/*
422 * zcache_mag_fill
423 *
424 * Description: fills a magazine with as many elements as the zone can give
0a7de745 425 * without blocking to carve out more memory
d9a64523
A
426 *
427 * Parameters: zone zone from which to allocate
428 * mag pointer to magazine to fill
429 *
430 * Return: True if able to allocate elements, false is mag is still empty
431 */
0a7de745
A
432bool
433zcache_mag_fill(zone_t zone, struct zcc_magazine *mag)
d9a64523
A
434{
435 assert(mag->zcc_magazine_index == 0);
436 void* elem = NULL;
437 uint32_t i;
438 lock_zone(zone);
0a7de745 439 for (i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i++) {
d9a64523 440 elem = zalloc_attempt(zone);
0a7de745 441 if (elem) {
d9a64523
A
442 zcache_canary_add(zone, elem);
443 zcache_mag_push(mag, elem);
444#if KASAN_ZALLOC
445 kasan_poison_range((vm_offset_t)elem, zone->elem_size, ASAN_HEAP_FREED);
446#endif
447 } else {
448 break;
449 }
450 }
451 unlock_zone(zone);
0a7de745 452 if (i == 0) {
d9a64523
A
453 return FALSE;
454 }
455 return TRUE;
456}
457
458/*
459 * zcache_mag_drain
460 *
461 * Description: frees all elements in a magazine
462 *
463 * Parameters: zone zone to which elements will be freed
464 * mag pointer to magazine to empty
465 *
466 */
0a7de745
A
467void
468zcache_mag_drain(zone_t zone, struct zcc_magazine *mag)
d9a64523
A
469{
470 assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity);
471 lock_zone(zone);
0a7de745 472 while (mag->zcc_magazine_index > 0) {
d9a64523
A
473 uint32_t index = --mag->zcc_magazine_index;
474 zcache_canary_validate(zone, mag->zcc_elements[index]);
0a7de745 475 zfree_direct(zone, (vm_offset_t)mag->zcc_elements[index]);
d9a64523
A
476 mag->zcc_elements[mag->zcc_magazine_index] = 0;
477 }
478 unlock_zone(zone);
479}
480
481/*
482 * zcache_mag_pop
483 *
484 * Description: removes last element from magazine in a stack pop fashion
485 * zcc_magazine_index represents the number of elements on the
486 * stack, so it the index of where to save the next element, when
487 * full, it will be 1 past the last index of the array
488 *
489 * Parameters: mag pointer to magazine from which to remove element
490 *
491 * Returns: pointer to element removed from magazine
492 *
493 * Precondition: must check that magazine is not empty before calling
494 */
0a7de745
A
495void *
496zcache_mag_pop(struct zcc_magazine *mag)
d9a64523 497{
0a7de745 498 void *elem;
d9a64523
A
499 assert(zcache_mag_has_elements(mag));
500 elem = mag->zcc_elements[--mag->zcc_magazine_index];
501 /* Ensure pointer to element cannot be accessed after we pop it */
502 mag->zcc_elements[mag->zcc_magazine_index] = NULL;
503 assert(elem != NULL);
504 return elem;
505}
506
507
508/*
509 * zcache_mag_push
510 *
511 * Description: adds element to magazine and increments zcc_magazine_index
512 * zcc_magazine_index represents the number of elements on the
513 * stack, so it the index of where to save the next element, when
514 * full, it will be 1 past the last index of the array
515 *
516 * Parameters: mag pointer to magazine from which to remove element
517 * elem pointer to element to add
518 *
519 * Precondition: must check that magazine is not full before calling
520 */
0a7de745
A
521void
522zcache_mag_push(struct zcc_magazine *mag, void *elem)
d9a64523
A
523{
524 assert(zcache_mag_has_space(mag));
0a7de745 525 mag->zcc_elements[mag->zcc_magazine_index++] = elem;
d9a64523
A
526}
527
528
529/*
530 * zcache_mag_has_space
531 *
532 * Description: checks if magazine still has capacity
533 *
534 * Parameters: mag pointer to magazine to check
535 *
536 * Returns: true if magazine is full
537 *
538 */
0a7de745
A
539bool
540zcache_mag_has_space(struct zcc_magazine *mag)
d9a64523 541{
0a7de745 542 return mag->zcc_magazine_index < mag->zcc_magazine_capacity;
d9a64523
A
543}
544
545
546/*
547 * zcache_mag_has_elements
548 *
549 * Description: checks if magazine is empty
550 *
551 * Parameters: mag pointer to magazine to check
552 *
553 * Returns: true if magazine has no elements
554 *
555 */
0a7de745
A
556bool
557zcache_mag_has_elements(struct zcc_magazine *mag)
d9a64523 558{
0a7de745 559 return mag->zcc_magazine_index > 0;
d9a64523
A
560}
561
562
563/*
564 * zcache_swap_magazines
565 *
566 * Description: Function which swaps two pointers of any type
567 *
568 * Parameters: a pointer to first pointer
569 * b pointer to second pointer
570 */
0a7de745
A
571void
572zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b)
d9a64523
A
573{
574 struct zcc_magazine *temp = *a;
575 *a = *b;
576 *b = temp;
577}
578
579
580/*
581 * zcache_mag_depot_swap_for_alloc
582 *
583 * Description: Swaps a full magazine into the current position
584 *
585 * Parameters: zcache pointer to the zone_cache to access the depot
586 * cache pointer to the current per-cpu cache
587 *
588 * Precondition: Check that the depot list has full elements
589 */
0a7de745
A
590void
591zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
d9a64523
A
592{
593 /* Loads a full magazine from which we can allocate */
594 assert(zcache_depot_available(zcache));
595 assert(zcache->zcc_depot_index > 0);
0a7de745 596 zcache->zcc_depot_index--;
d9a64523
A
597 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
598}
599
600
601/*
602 * zcache_mag_depot_swap_for_free
603 *
604 * Description: Swaps an empty magazine into the current position
605 *
606 * Parameters: zcache pointer to the zone_cache to access the depot
607 * cache pointer to the current per-cpu cache
608 *
609 * Precondition: Check that the depot list has empty elements
610 */
0a7de745
A
611void
612zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache)
d9a64523
A
613{
614 /* Loads an empty magazine into which we can free */
615 assert(zcache_depot_available(zcache));
616 assert(zcache->zcc_depot_index < depot_element_count);
617 zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]);
0a7de745 618 zcache->zcc_depot_index++;
d9a64523
A
619}
620
621/*
622 * zcache_canary_add
623 *
0a7de745
A
624 * Description: Adds a canary to an element by putting zcache_canary at the first
625 * and last location of the element
d9a64523
A
626 *
627 * Parameters: zone zone for the element
0a7de745 628 * addr element address to add canary to
d9a64523
A
629 *
630 */
0a7de745
A
631void
632zcache_canary_add(zone_t zone, void *element)
d9a64523
A
633{
634 vm_offset_t *primary = (vm_offset_t *)element;
635 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
636 *primary = *backup = (zcache_canary ^ (uintptr_t)element);
637}
638
639/*
640 * zcache_canary_validate
641 *
0a7de745
A
642 * Description: Validates an element of the zone cache to make sure it still contains the zone
643 * caching canary.
d9a64523
A
644 *
645 * Parameters: zone zone for the element
0a7de745 646 * addr element address to validate
d9a64523
A
647 *
648 */
0a7de745
A
649void
650zcache_canary_validate(zone_t zone, void *element)
d9a64523
A
651{
652 vm_offset_t *primary = (vm_offset_t *)element;
653 vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t));
654
655 vm_offset_t primary_value = (*primary ^ (uintptr_t)element);
656 if (primary_value != zcache_canary) {
657 panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p",
0a7de745 658 element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*primary), (void *)zcache_canary);
d9a64523 659 }
0a7de745 660
d9a64523
A
661 vm_offset_t backup_value = (*backup ^ (uintptr_t)element);
662 if (backup_value != zcache_canary) {
663 panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p",
0a7de745 664 element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary);
d9a64523
A
665 }
666}