]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2017-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <kern/assert.h> | |
30 | #include <kern/cpu_data.h> | |
31 | #include <mach/mach_host.h> | |
32 | #include <vm/vm_kern.h> | |
33 | #include <kern/startup.h> | |
34 | #include <kern/zalloc_internal.h> | |
35 | ||
36 | /* Size of array in magazine determined by boot-arg or default */ | |
37 | TUNABLE(uint16_t, magazine_element_count, "zcc_magazine_element_count", 8); | |
38 | ||
39 | /* Size of depot lists determined by boot-arg or default */ | |
40 | TUNABLE(uint16_t, depot_element_count, "zcc_depot_element_count", 8); | |
41 | ||
42 | SECURITY_READ_ONLY_LATE(zone_t) magazine_zone; /* zone to allocate zcc_magazine structs from */ | |
43 | SECURITY_READ_ONLY_LATE(uintptr_t) zcache_canary; /* Canary used for the caching layer to prevent UaF attacks */ | |
44 | ||
45 | /* | |
46 | * The zcc_magazine is used as a stack to store cached zone elements. These | |
47 | * sets of elements can be moved around to perform bulk operations. | |
48 | */ | |
49 | struct zcc_magazine { | |
50 | uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */ | |
51 | uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */ | |
52 | vm_offset_t zcc_elements[0]; /* Array of pointers to objects */ | |
53 | }; | |
54 | ||
55 | ||
56 | /* | |
57 | * Each CPU will use one of these to store its elements | |
58 | */ | |
59 | struct zcc_per_cpu_cache { | |
60 | /* Magazine from which we will always try to allocate from and free to first */ | |
61 | struct zcc_magazine *current; | |
62 | /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */ | |
63 | struct zcc_magazine *previous; | |
64 | /* Zcache poisoning count */ | |
65 | uint32_t zp_count; | |
66 | #if ZALLOC_DETAILED_STATS | |
67 | uint64_t zcc_allocs; | |
68 | uint64_t zcc_frees; | |
69 | #endif /* ZALLOC_DETAILED_STATS */ | |
70 | }; | |
71 | ||
72 | ||
73 | /* This is the basic struct to take care of cahing and is included within | |
74 | * the zone. | |
75 | */ | |
76 | struct zcc_depot { | |
77 | /* marks the point in the array where empty magazines begin */ | |
78 | int zcc_depot_index; | |
79 | ||
80 | #if ZALLOC_DETAILED_STATS | |
81 | uint64_t zcc_swap; | |
82 | uint64_t zcc_fill; | |
83 | uint64_t zcc_drain; | |
84 | uint64_t zcc_fail; | |
85 | uint64_t zcc_gc; | |
86 | #endif /* ZALLOC_DETAILED_STATS */ | |
87 | ||
88 | /* Stores full and empty magazines in the depot layer */ | |
89 | struct zcc_magazine *zcc_depot_list[0]; | |
90 | }; | |
91 | ||
92 | static bool zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag); | |
93 | static void zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag); | |
94 | static bool zcache_mag_has_space(struct zcc_magazine *mag); | |
95 | static bool zcache_mag_has_elements(struct zcc_magazine *mag); | |
96 | static void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b); | |
97 | static void zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache); | |
98 | static void zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache); | |
99 | static void zcache_canary_add(zone_t zone, vm_offset_t addr); | |
100 | #if ZALLOC_ENABLE_POISONING | |
101 | static void zcache_validate_element(zone_t zone, vm_offset_t *addr, bool poison); | |
102 | static void zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup); | |
103 | #endif | |
104 | ||
105 | /* | |
106 | * zcache_ready | |
107 | * | |
108 | * Returns whether or not the zone caches are ready to use | |
109 | * | |
110 | */ | |
111 | static bool | |
112 | zcache_ready(void) | |
113 | { | |
114 | return magazine_zone != NULL; | |
115 | } | |
116 | ||
117 | /* | |
118 | * zcache_bootstrap | |
119 | * | |
120 | * Initializes zone to allocate magazines from and sets | |
121 | * magazine_element_count and depot_element_count from | |
122 | * boot-args or default values | |
123 | * | |
124 | */ | |
125 | __startup_func | |
126 | static void | |
127 | zcache_bootstrap(void) | |
128 | { | |
129 | int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *); | |
130 | zone_t magzone; | |
131 | ||
132 | /* Generate the canary value for zone caches */ | |
133 | zcache_canary = (uintptr_t) early_random(); | |
134 | ||
135 | magzone = zone_create("zcc_magazine_zone", magazine_size, | |
136 | ZC_NOCACHING | ZC_ZFREE_CLEARMEM); | |
137 | ||
138 | /* | |
139 | * This causes zcache_ready() to return true. | |
140 | */ | |
141 | os_atomic_store(&magazine_zone, magzone, compiler_acq_rel); | |
142 | ||
143 | /* | |
144 | * Now that we are initialized, we can enable zone caching for zones that | |
145 | * were made before zcache_bootstrap() was called. | |
146 | * | |
147 | * The system is still single threaded so we don't need to take the lock. | |
148 | */ | |
149 | zone_index_foreach(i) { | |
150 | if (zone_array[i].cpu_cache_enabled) { | |
151 | zcache_init(&zone_array[i]); | |
152 | } | |
153 | } | |
154 | } | |
155 | STARTUP(ZALLOC, STARTUP_RANK_FOURTH, zcache_bootstrap); | |
156 | ||
157 | static struct zcc_magazine * | |
158 | zcache_mag_alloc(void) | |
159 | { | |
160 | struct zcc_magazine *mag = zalloc_flags(magazine_zone, Z_WAITOK); | |
161 | mag->zcc_magazine_capacity = magazine_element_count; | |
162 | return mag; | |
163 | } | |
164 | ||
165 | ||
166 | /* | |
167 | * zcache_init | |
168 | * | |
169 | * Initializes all parts of the per-cpu caches for a given zone | |
170 | * | |
171 | * Parameters: | |
172 | * zone pointer to zone on which to iniitalize caching | |
173 | * | |
174 | */ | |
175 | void | |
176 | zcache_init(zone_t zone) | |
177 | { | |
178 | struct zcc_per_cpu_cache *pcpu_caches; | |
179 | struct zcc_depot *depot; | |
180 | vm_size_t size; | |
181 | ||
182 | /* | |
183 | * If zcache hasn't been initialized yet, remember our decision, | |
184 | * | |
185 | * zcache_init() will be called again by zcache_bootstrap(), | |
186 | * while the system is still single threaded, to build the missing caches. | |
187 | */ | |
188 | if (!zcache_ready()) { | |
189 | zone->cpu_cache_enabled = true; | |
190 | return; | |
191 | } | |
192 | ||
193 | /* Allocate chunk of memory for all structs */ | |
194 | size = sizeof(struct zcc_depot) + (depot_element_count * sizeof(void *)); | |
195 | depot = zalloc_permanent(size, ZALIGN_PTR); | |
196 | ||
197 | size = sizeof(struct zcc_per_cpu_cache); | |
198 | pcpu_caches = zalloc_percpu_permanent(size, ZALIGN_PTR); | |
199 | ||
200 | /* Initialize a cache for every CPU */ | |
201 | zpercpu_foreach(cache, pcpu_caches) { | |
202 | cache->current = zcache_mag_alloc(); | |
203 | cache->previous = zcache_mag_alloc(); | |
204 | cache->zp_count = zone_poison_count_init(zone); | |
205 | } | |
206 | ||
207 | /* Initialize empty magazines in the depot list */ | |
208 | for (int i = 0; i < depot_element_count; i++) { | |
209 | depot->zcc_depot_list[i] = zcache_mag_alloc(); | |
210 | } | |
211 | ||
212 | lock_zone(zone); | |
213 | if (zone->zcache.zcc_depot) { | |
214 | panic("allocating caches for zone %s twice", zone->z_name); | |
215 | } | |
216 | ||
217 | /* Make the initialization of the per-cpu magazines visible. */ | |
218 | os_atomic_thread_fence(release); | |
219 | ||
220 | zone->zcache.zcc_depot = depot; | |
221 | zone->zcache.zcc_pcpu = pcpu_caches; | |
222 | zone->cpu_cache_enabled = true; | |
223 | unlock_zone(zone); | |
224 | } | |
225 | ||
226 | /* | |
227 | * zcache_drain_depot | |
228 | * | |
229 | * Frees all the full magazines from the depot layer to the zone allocator as part | |
230 | * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock | |
231 | * ensures that) | |
232 | * | |
233 | * Parameters: | |
234 | * zone pointer to zone for which the depot layer needs to be drained | |
235 | * | |
236 | * Returns: None | |
237 | * | |
238 | */ | |
239 | void | |
240 | zcache_drain_depot(zone_t zone) | |
241 | { | |
242 | struct zcc_depot *depot; | |
243 | int drain_depot_index = 0; | |
244 | ||
245 | lock_zone(zone); | |
246 | depot = zone->zcache.zcc_depot; | |
247 | drain_depot_index = depot->zcc_depot_index; | |
248 | for (int i = 0; i < drain_depot_index; i++) { | |
249 | zcache_mag_drain_locked(zone, depot->zcc_depot_list[i]); | |
250 | } | |
251 | #if ZALLOC_DETAILED_STATS | |
252 | depot->zcc_gc += drain_depot_index; | |
253 | #endif /* ZALLOC_DETAILED_STATS */ | |
254 | depot->zcc_depot_index = 0; | |
255 | unlock_zone(zone); | |
256 | } | |
257 | ||
258 | __attribute__((noinline)) | |
259 | static void | |
260 | zcache_free_to_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache) | |
261 | { | |
262 | struct zcc_depot *depot; | |
263 | ||
264 | lock_zone(zone); | |
265 | depot = zone->zcache.zcc_depot; | |
266 | if (depot->zcc_depot_index < depot_element_count) { | |
267 | /* If able, rotate in a new empty magazine from the depot and retry */ | |
268 | zcache_mag_depot_swap_for_free(depot, per_cpu_cache); | |
269 | } else { | |
270 | /* Free an entire magazine of elements */ | |
271 | zcache_mag_drain_locked(zone, per_cpu_cache->current); | |
272 | #if ZALLOC_DETAILED_STATS | |
273 | depot->zcc_drain++; | |
274 | #endif /* ZALLOC_DETAILED_STATS */ | |
275 | } | |
276 | unlock_zone(zone); | |
277 | } | |
278 | ||
279 | ||
280 | void | |
281 | zcache_free_to_cpu_cache(zone_t zone, zone_stats_t zstats, vm_offset_t addr) | |
282 | { | |
283 | struct zcc_per_cpu_cache *per_cpu_cache; | |
284 | vm_offset_t elem = addr; | |
285 | int cpu; | |
286 | ||
287 | zone_allocated_element_validate(zone, elem); | |
288 | ||
289 | /* | |
290 | * This is racy but we don't need zp_count to be accurate. | |
291 | * This allows us to do the poisoning with preemption enabled. | |
292 | */ | |
293 | per_cpu_cache = zpercpu_get(zone->zcache.zcc_pcpu); | |
294 | if (zfree_clear_or_poison(zone, &per_cpu_cache->zp_count, elem)) { | |
295 | addr |= ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
296 | } else { | |
297 | zcache_canary_add(zone, elem); | |
298 | } | |
299 | ||
300 | #if KASAN_ZALLOC | |
301 | kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED); | |
302 | #endif | |
303 | ||
304 | disable_preemption(); | |
305 | cpu = cpu_number(); | |
306 | per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu); | |
307 | ||
308 | if (zcache_mag_has_space(per_cpu_cache->current)) { | |
309 | /* If able, free into current magazine */ | |
310 | } else if (zcache_mag_has_space(per_cpu_cache->previous)) { | |
311 | /* If able, swap current and previous magazine and retry */ | |
312 | zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current); | |
313 | } else { | |
314 | zcache_free_to_cpu_cache_slow(zone, per_cpu_cache); | |
315 | } | |
316 | ||
317 | struct zcc_magazine *mag = per_cpu_cache->current; | |
318 | mag->zcc_elements[mag->zcc_magazine_index++] = addr; | |
319 | zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += zone_elem_size(zone); | |
320 | #if ZALLOC_DETAILED_STATS | |
321 | per_cpu_cache->zcc_frees++; | |
322 | #endif /* ZALLOC_DETAILED_STATS */ | |
323 | ||
324 | enable_preemption(); | |
325 | } | |
326 | ||
327 | __attribute__((noinline)) | |
328 | static bool | |
329 | zcache_alloc_from_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache) | |
330 | { | |
331 | struct zcc_depot *depot; | |
332 | ||
333 | lock_zone(zone); | |
334 | depot = zone->zcache.zcc_depot; | |
335 | if (depot->zcc_depot_index > 0) { | |
336 | /* If able, rotate in a full magazine from the depot */ | |
337 | zcache_mag_depot_swap_for_alloc(depot, per_cpu_cache); | |
338 | } else if (zcache_mag_fill_locked(zone, per_cpu_cache->current)) { | |
339 | #if ZALLOC_DETAILED_STATS | |
340 | depot->zcc_fill++; | |
341 | #endif /* ZALLOC_DETAILED_STATS */ | |
342 | } else { | |
343 | #if ZALLOC_DETAILED_STATS | |
344 | depot->zcc_fail++; | |
345 | #endif /* ZALLOC_DETAILED_STATS */ | |
346 | /* If unable to allocate from cache return NULL and fall through to zalloc */ | |
347 | unlock_zone(zone); | |
348 | enable_preemption(); | |
349 | return false; | |
350 | } | |
351 | unlock_zone(zone); | |
352 | ||
353 | return true; | |
354 | } | |
355 | ||
356 | vm_offset_t | |
357 | zcache_alloc_from_cpu_cache(zone_t zone, zone_stats_t zstats, vm_size_t waste) | |
358 | { | |
359 | struct zcc_per_cpu_cache *per_cpu_cache; | |
360 | int cpu; | |
361 | ||
362 | disable_preemption(); | |
363 | cpu = cpu_number(); | |
364 | per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu); | |
365 | ||
366 | if (zcache_mag_has_elements(per_cpu_cache->current)) { | |
367 | /* If able, allocate from current magazine */ | |
368 | } else if (zcache_mag_has_elements(per_cpu_cache->previous)) { | |
369 | /* If able, swap current and previous magazine and retry */ | |
370 | zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current); | |
371 | } else if (!zcache_alloc_from_cpu_cache_slow(zone, per_cpu_cache)) { | |
372 | return (vm_offset_t)NULL; | |
373 | } | |
374 | ||
375 | struct zcc_magazine *mag = per_cpu_cache->current; | |
376 | vm_offset_t elem_size = zone_elem_size(zone); | |
377 | uint32_t index = --mag->zcc_magazine_index; | |
378 | vm_offset_t addr = mag->zcc_elements[index]; | |
379 | mag->zcc_elements[index] = 0; | |
380 | zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += elem_size; | |
381 | #if ZALLOC_DETAILED_STATS | |
382 | if (waste) { | |
383 | zpercpu_get_cpu(zstats, cpu)->zs_mem_wasted += waste; | |
384 | } | |
385 | per_cpu_cache->zcc_allocs++; | |
386 | #else | |
387 | (void)waste; | |
388 | #endif /* ZALLOC_DETAILED_STATS */ | |
389 | ||
390 | enable_preemption(); | |
391 | ||
392 | #if ZALLOC_ENABLE_POISONING | |
393 | bool validate = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
394 | #endif /* ZALLOC_ENABLE_POISONING */ | |
395 | ||
396 | addr &= ~ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
397 | ||
398 | #if KASAN_ZALLOC | |
399 | kasan_poison_range(addr, elem_size, ASAN_VALID); | |
400 | #endif | |
401 | #if ZALLOC_ENABLE_POISONING | |
402 | if (!validate) { | |
403 | vm_offset_t backup = addr + elem_size - sizeof(vm_offset_t); | |
404 | zcache_validate_and_clear_canary(zone, (vm_offset_t *)addr, | |
405 | (vm_offset_t *)backup); | |
406 | } | |
407 | zalloc_validate_element(zone, addr, elem_size, validate); | |
408 | #endif /* ZALLOC_ENABLE_POISONING */ | |
409 | ||
410 | return addr; | |
411 | } | |
412 | ||
413 | ||
414 | /* | |
415 | * zcache_mag_fill_locked | |
416 | * | |
417 | * Fills a magazine with as many elements as the zone can give | |
418 | * without blocking to carve out more memory | |
419 | * | |
420 | * Parameters: | |
421 | * zone zone from which to allocate | |
422 | * mag pointer to magazine to fill | |
423 | * | |
424 | * Return: True if able to allocate elements, false is mag is still empty | |
425 | */ | |
426 | static bool | |
427 | zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag) | |
428 | { | |
429 | uint32_t i = mag->zcc_magazine_index; | |
430 | uint32_t end = mag->zcc_magazine_capacity; | |
431 | vm_offset_t elem, addr; | |
432 | ||
433 | while (i < end && zone->countfree) { | |
434 | addr = zalloc_direct_locked(zone, Z_NOWAIT, 0); | |
435 | elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
436 | if (addr & ZALLOC_ELEMENT_NEEDS_VALIDATION) { | |
437 | zone_clear_freelist_pointers(zone, elem); | |
438 | } else { | |
439 | zcache_canary_add(zone, elem); | |
440 | } | |
441 | #if KASAN_ZALLOC | |
442 | kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED); | |
443 | #endif | |
444 | mag->zcc_elements[i++] = addr; | |
445 | } | |
446 | ||
447 | mag->zcc_magazine_index = i; | |
448 | ||
449 | return i != 0; | |
450 | } | |
451 | ||
452 | /* | |
453 | * zcache_mag_drain_locked | |
454 | * | |
455 | * Frees all elements in a magazine | |
456 | * | |
457 | * Parameters: | |
458 | * zone zone to which elements will be freed | |
459 | * mag pointer to magazine to empty | |
460 | * | |
461 | */ | |
462 | static void | |
463 | zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag) | |
464 | { | |
465 | vm_offset_t elem, addr; | |
466 | bool poison; | |
467 | ||
468 | for (uint32_t i = 0, end = mag->zcc_magazine_index; i < end; i++) { | |
469 | addr = mag->zcc_elements[i]; | |
470 | poison = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
471 | elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION; | |
472 | ||
473 | #if ZALLOC_ENABLE_POISONING | |
474 | zcache_validate_element(zone, (vm_offset_t *)elem, poison); | |
475 | #endif /* ZALLOC_ENABLE_POISONING */ | |
476 | zfree_direct_locked(zone, elem, poison); | |
477 | mag->zcc_elements[i] = 0; | |
478 | } | |
479 | mag->zcc_magazine_index = 0; | |
480 | } | |
481 | ||
482 | ||
483 | /* | |
484 | * zcache_mag_has_space | |
485 | * | |
486 | * Checks if magazine still has capacity | |
487 | * | |
488 | * Parameters: | |
489 | * mag pointer to magazine to check | |
490 | * | |
491 | * Returns: true if magazine is full | |
492 | * | |
493 | */ | |
494 | static bool | |
495 | zcache_mag_has_space(struct zcc_magazine *mag) | |
496 | { | |
497 | return mag->zcc_magazine_index < mag->zcc_magazine_capacity; | |
498 | } | |
499 | ||
500 | ||
501 | /* | |
502 | * zcache_mag_has_elements | |
503 | * | |
504 | * Checks if magazine is empty | |
505 | * | |
506 | * Parameters: | |
507 | * mag pointer to magazine to check | |
508 | * | |
509 | * Returns: true if magazine has no elements | |
510 | * | |
511 | */ | |
512 | static bool | |
513 | zcache_mag_has_elements(struct zcc_magazine *mag) | |
514 | { | |
515 | return mag->zcc_magazine_index > 0; | |
516 | } | |
517 | ||
518 | ||
519 | /* | |
520 | * zcache_swap_magazines | |
521 | * | |
522 | * Function which swaps two pointers of any type | |
523 | * | |
524 | * Parameters: | |
525 | * a pointer to first pointer | |
526 | * b pointer to second pointer | |
527 | */ | |
528 | static void | |
529 | zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) | |
530 | { | |
531 | struct zcc_magazine *temp = *a; | |
532 | *a = *b; | |
533 | *b = temp; | |
534 | } | |
535 | ||
536 | ||
537 | /* | |
538 | * zcache_mag_depot_swap_for_alloc | |
539 | * | |
540 | * Swaps a full magazine into the current position | |
541 | * | |
542 | * Parameters: | |
543 | * depot pointer to the depot | |
544 | * cache pointer to the current per-cpu cache | |
545 | * | |
546 | * Precondition: Check that the depot list has full elements | |
547 | */ | |
548 | static void | |
549 | zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache) | |
550 | { | |
551 | /* Loads a full magazine from which we can allocate */ | |
552 | assert(depot->zcc_depot_index > 0); | |
553 | depot->zcc_depot_index--; | |
554 | #if ZALLOC_DETAILED_STATS | |
555 | depot->zcc_swap++; | |
556 | #endif /* ZALLOC_DETAILED_STATS */ | |
557 | zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]); | |
558 | } | |
559 | ||
560 | ||
561 | /* | |
562 | * zcache_mag_depot_swap_for_free | |
563 | * | |
564 | * Swaps an empty magazine into the current position | |
565 | * | |
566 | * Parameters: | |
567 | * depot pointer to the depot | |
568 | * cache pointer to the current per-cpu cache | |
569 | * | |
570 | * Precondition: Check that the depot list has empty elements | |
571 | */ | |
572 | static void | |
573 | zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache) | |
574 | { | |
575 | /* Loads an empty magazine into which we can free */ | |
576 | assert(depot->zcc_depot_index < depot_element_count); | |
577 | zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]); | |
578 | #if ZALLOC_DETAILED_STATS | |
579 | depot->zcc_swap++; | |
580 | #endif /* ZALLOC_DETAILED_STATS */ | |
581 | depot->zcc_depot_index++; | |
582 | } | |
583 | ||
584 | /* | |
585 | * zcache_canary_add | |
586 | * | |
587 | * Adds a canary to an element by putting zcache_canary at the first | |
588 | * and last location of the element | |
589 | * | |
590 | * Parameters: | |
591 | * zone zone for the element | |
592 | * addr element address to add canary to | |
593 | */ | |
594 | static void | |
595 | zcache_canary_add(zone_t zone, vm_offset_t element) | |
596 | { | |
597 | #if ZALLOC_ENABLE_POISONING | |
598 | vm_offset_t *primary = (vm_offset_t *)element; | |
599 | vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + | |
600 | zone_elem_size(zone) - sizeof(vm_offset_t)); | |
601 | *primary = *backup = (zcache_canary ^ (uintptr_t)element); | |
602 | #else | |
603 | #pragma unused(zone, element) | |
604 | #endif | |
605 | } | |
606 | ||
607 | #if ZALLOC_ENABLE_POISONING | |
608 | __abortlike static void | |
609 | zcache_validation_panic(zone_t zone, vm_offset_t *primary, vm_offset_t *backup, | |
610 | vm_offset_t permutation) | |
611 | { | |
612 | vm_offset_t primary_value = 0; | |
613 | vm_offset_t backup_value = 0; | |
614 | ||
615 | if (permutation == zcache_canary) { | |
616 | primary_value = *primary ^ (vm_offset_t)primary; | |
617 | backup_value = *backup ^ (vm_offset_t)primary; | |
618 | permutation = permutation ^ (vm_offset_t)primary; | |
619 | } else { | |
620 | primary_value = *primary; | |
621 | backup_value = *backup; | |
622 | } | |
623 | if (primary_value != permutation) { | |
624 | panic("Zone cache element was used after free! Element %p was corrupted at " | |
625 | "beginning; Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)", | |
626 | primary, (uintptr_t) permutation, (uintptr_t) *primary, zcache_canary, zone, | |
627 | zone_heap_name(zone), zone->z_name); | |
628 | } else { | |
629 | panic("Zone cache element was used after free! Element %p was corrupted at end; " | |
630 | "Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)", | |
631 | primary, (uintptr_t) permutation, (uintptr_t) *backup, zcache_canary, zone, | |
632 | zone_heap_name(zone), zone->z_name); | |
633 | } | |
634 | } | |
635 | ||
636 | /* | |
637 | * zcache_validate_and_clear_canary | |
638 | * | |
639 | * Validates an element of the zone cache to make sure it still contains the zone | |
640 | * caching canary and clears it. | |
641 | * | |
642 | * Parameters: | |
643 | * zone zone for the element | |
644 | * primary addr of canary placed in front | |
645 | * backup addr of canary placed at the back | |
646 | */ | |
647 | static void | |
648 | zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup) | |
649 | { | |
650 | vm_offset_t primary_value = (*primary ^ (uintptr_t)primary); | |
651 | vm_offset_t backup_value = (*backup ^ (uintptr_t)primary); | |
652 | ||
653 | if (primary_value == zcache_canary && backup_value == zcache_canary) { | |
654 | *primary = *backup = ZONE_POISON; | |
655 | } else { | |
656 | zcache_validation_panic(zone, primary, backup, zcache_canary); | |
657 | } | |
658 | } | |
659 | ||
660 | /* | |
661 | * zcache_validate_element | |
662 | * | |
663 | * Validates the first and last pointer size of the element to ensure | |
664 | * that they haven't been altered. This function is used when an | |
665 | * element moves from cache to zone, therefore only validing the | |
666 | * first and last pointer size (location of future freelist pointers). | |
667 | * | |
668 | * Parameters: | |
669 | * zone zone for the element | |
670 | * element addr of element to validate | |
671 | * poison has the element been poisoned | |
672 | */ | |
673 | static void | |
674 | zcache_validate_element(zone_t zone, vm_offset_t *element, bool poison) | |
675 | { | |
676 | vm_offset_t *primary = (vm_offset_t *)element; | |
677 | vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + | |
678 | zone_elem_size(zone) - sizeof(vm_offset_t)); | |
679 | ||
680 | if (zone->zfree_clear_mem) { | |
681 | if (*primary == 0 && *backup == 0) { | |
682 | return; | |
683 | } else { | |
684 | zcache_validation_panic(zone, primary, backup, 0); | |
685 | } | |
686 | } | |
687 | ||
688 | if (__probable(!poison)) { | |
689 | zcache_validate_and_clear_canary(zone, primary, backup); | |
690 | } else { | |
691 | if (*primary == ZONE_POISON && *backup == ZONE_POISON) { | |
692 | return; | |
693 | } else { | |
694 | zcache_validation_panic(zone, primary, backup, ZONE_POISON); | |
695 | } | |
696 | } | |
697 | } | |
698 | #endif /* ZALLOC_ENABLE_POISONING */ |