2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/assert.h>
30 #include <kern/cpu_data.h>
31 #include <mach/mach_host.h>
32 #include <vm/vm_kern.h>
33 #include <kern/startup.h>
34 #include <kern/zalloc_internal.h>
36 /* Size of array in magazine determined by boot-arg or default */
37 TUNABLE(uint16_t, magazine_element_count
, "zcc_magazine_element_count", 8);
39 /* Size of depot lists determined by boot-arg or default */
40 TUNABLE(uint16_t, depot_element_count
, "zcc_depot_element_count", 8);
42 SECURITY_READ_ONLY_LATE(zone_t
) magazine_zone
; /* zone to allocate zcc_magazine structs from */
43 SECURITY_READ_ONLY_LATE(uintptr_t) zcache_canary
; /* Canary used for the caching layer to prevent UaF attacks */
46 * The zcc_magazine is used as a stack to store cached zone elements. These
47 * sets of elements can be moved around to perform bulk operations.
50 uint32_t zcc_magazine_index
; /* Used as a stack pointer to acess elements in the array */
51 uint32_t zcc_magazine_capacity
; /* Number of pointers able to be stored in the zcc_elements array */
52 vm_offset_t zcc_elements
[0]; /* Array of pointers to objects */
57 * Each CPU will use one of these to store its elements
59 struct zcc_per_cpu_cache
{
60 /* Magazine from which we will always try to allocate from and free to first */
61 struct zcc_magazine
*current
;
62 /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
63 struct zcc_magazine
*previous
;
64 /* Zcache poisoning count */
66 #if ZALLOC_DETAILED_STATS
69 #endif /* ZALLOC_DETAILED_STATS */
73 /* This is the basic struct to take care of cahing and is included within
77 /* marks the point in the array where empty magazines begin */
80 #if ZALLOC_DETAILED_STATS
86 #endif /* ZALLOC_DETAILED_STATS */
88 /* Stores full and empty magazines in the depot layer */
89 struct zcc_magazine
*zcc_depot_list
[0];
92 static bool zcache_mag_fill_locked(zone_t zone
, struct zcc_magazine
*mag
);
93 static void zcache_mag_drain_locked(zone_t zone
, struct zcc_magazine
*mag
);
94 static bool zcache_mag_has_space(struct zcc_magazine
*mag
);
95 static bool zcache_mag_has_elements(struct zcc_magazine
*mag
);
96 static void zcache_swap_magazines(struct zcc_magazine
**a
, struct zcc_magazine
**b
);
97 static void zcache_mag_depot_swap_for_alloc(struct zcc_depot
*depot
, struct zcc_per_cpu_cache
*cache
);
98 static void zcache_mag_depot_swap_for_free(struct zcc_depot
*depot
, struct zcc_per_cpu_cache
*cache
);
99 static void zcache_canary_add(zone_t zone
, vm_offset_t addr
);
100 #if ZALLOC_ENABLE_POISONING
101 static void zcache_validate_element(zone_t zone
, vm_offset_t
*addr
, bool poison
);
102 static void zcache_validate_and_clear_canary(zone_t zone
, vm_offset_t
*primary
, vm_offset_t
*backup
);
108 * Returns whether or not the zone caches are ready to use
114 return magazine_zone
!= NULL
;
120 * Initializes zone to allocate magazines from and sets
121 * magazine_element_count and depot_element_count from
122 * boot-args or default values
127 zcache_bootstrap(void)
129 int magazine_size
= sizeof(struct zcc_magazine
) + magazine_element_count
* sizeof(void *);
132 /* Generate the canary value for zone caches */
133 zcache_canary
= (uintptr_t) early_random();
135 magzone
= zone_create("zcc_magazine_zone", magazine_size
,
136 ZC_NOCACHING
| ZC_ZFREE_CLEARMEM
);
139 * This causes zcache_ready() to return true.
141 os_atomic_store(&magazine_zone
, magzone
, compiler_acq_rel
);
144 * Now that we are initialized, we can enable zone caching for zones that
145 * were made before zcache_bootstrap() was called.
147 * The system is still single threaded so we don't need to take the lock.
149 zone_index_foreach(i
) {
150 if (zone_array
[i
].cpu_cache_enabled
) {
151 zcache_init(&zone_array
[i
]);
155 STARTUP(ZALLOC
, STARTUP_RANK_FOURTH
, zcache_bootstrap
);
157 static struct zcc_magazine
*
158 zcache_mag_alloc(void)
160 struct zcc_magazine
*mag
= zalloc_flags(magazine_zone
, Z_WAITOK
);
161 mag
->zcc_magazine_capacity
= magazine_element_count
;
169 * Initializes all parts of the per-cpu caches for a given zone
172 * zone pointer to zone on which to iniitalize caching
176 zcache_init(zone_t zone
)
178 struct zcc_per_cpu_cache
*pcpu_caches
;
179 struct zcc_depot
*depot
;
183 * If zcache hasn't been initialized yet, remember our decision,
185 * zcache_init() will be called again by zcache_bootstrap(),
186 * while the system is still single threaded, to build the missing caches.
188 if (!zcache_ready()) {
189 zone
->cpu_cache_enabled
= true;
193 /* Allocate chunk of memory for all structs */
194 size
= sizeof(struct zcc_depot
) + (depot_element_count
* sizeof(void *));
195 depot
= zalloc_permanent(size
, ZALIGN_PTR
);
197 size
= sizeof(struct zcc_per_cpu_cache
);
198 pcpu_caches
= zalloc_percpu_permanent(size
, ZALIGN_PTR
);
200 /* Initialize a cache for every CPU */
201 zpercpu_foreach(cache
, pcpu_caches
) {
202 cache
->current
= zcache_mag_alloc();
203 cache
->previous
= zcache_mag_alloc();
204 cache
->zp_count
= zone_poison_count_init(zone
);
207 /* Initialize empty magazines in the depot list */
208 for (int i
= 0; i
< depot_element_count
; i
++) {
209 depot
->zcc_depot_list
[i
] = zcache_mag_alloc();
213 if (zone
->zcache
.zcc_depot
) {
214 panic("allocating caches for zone %s twice", zone
->z_name
);
217 /* Make the initialization of the per-cpu magazines visible. */
218 os_atomic_thread_fence(release
);
220 zone
->zcache
.zcc_depot
= depot
;
221 zone
->zcache
.zcc_pcpu
= pcpu_caches
;
222 zone
->cpu_cache_enabled
= true;
229 * Frees all the full magazines from the depot layer to the zone allocator as part
230 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
234 * zone pointer to zone for which the depot layer needs to be drained
240 zcache_drain_depot(zone_t zone
)
242 struct zcc_depot
*depot
;
243 int drain_depot_index
= 0;
246 depot
= zone
->zcache
.zcc_depot
;
247 drain_depot_index
= depot
->zcc_depot_index
;
248 for (int i
= 0; i
< drain_depot_index
; i
++) {
249 zcache_mag_drain_locked(zone
, depot
->zcc_depot_list
[i
]);
251 #if ZALLOC_DETAILED_STATS
252 depot
->zcc_gc
+= drain_depot_index
;
253 #endif /* ZALLOC_DETAILED_STATS */
254 depot
->zcc_depot_index
= 0;
258 __attribute__((noinline
))
260 zcache_free_to_cpu_cache_slow(zone_t zone
, struct zcc_per_cpu_cache
*per_cpu_cache
)
262 struct zcc_depot
*depot
;
265 depot
= zone
->zcache
.zcc_depot
;
266 if (depot
->zcc_depot_index
< depot_element_count
) {
267 /* If able, rotate in a new empty magazine from the depot and retry */
268 zcache_mag_depot_swap_for_free(depot
, per_cpu_cache
);
270 /* Free an entire magazine of elements */
271 zcache_mag_drain_locked(zone
, per_cpu_cache
->current
);
272 #if ZALLOC_DETAILED_STATS
274 #endif /* ZALLOC_DETAILED_STATS */
281 zcache_free_to_cpu_cache(zone_t zone
, zone_stats_t zstats
, vm_offset_t addr
)
283 struct zcc_per_cpu_cache
*per_cpu_cache
;
284 vm_offset_t elem
= addr
;
287 zone_allocated_element_validate(zone
, elem
);
290 * This is racy but we don't need zp_count to be accurate.
291 * This allows us to do the poisoning with preemption enabled.
293 per_cpu_cache
= zpercpu_get(zone
->zcache
.zcc_pcpu
);
294 if (zfree_clear_or_poison(zone
, &per_cpu_cache
->zp_count
, elem
)) {
295 addr
|= ZALLOC_ELEMENT_NEEDS_VALIDATION
;
297 zcache_canary_add(zone
, elem
);
301 kasan_poison_range(elem
, zone_elem_size(zone
), ASAN_HEAP_FREED
);
304 disable_preemption();
306 per_cpu_cache
= zpercpu_get_cpu(zone
->zcache
.zcc_pcpu
, cpu
);
308 if (zcache_mag_has_space(per_cpu_cache
->current
)) {
309 /* If able, free into current magazine */
310 } else if (zcache_mag_has_space(per_cpu_cache
->previous
)) {
311 /* If able, swap current and previous magazine and retry */
312 zcache_swap_magazines(&per_cpu_cache
->previous
, &per_cpu_cache
->current
);
314 zcache_free_to_cpu_cache_slow(zone
, per_cpu_cache
);
317 struct zcc_magazine
*mag
= per_cpu_cache
->current
;
318 mag
->zcc_elements
[mag
->zcc_magazine_index
++] = addr
;
319 zpercpu_get_cpu(zstats
, cpu
)->zs_mem_freed
+= zone_elem_size(zone
);
320 #if ZALLOC_DETAILED_STATS
321 per_cpu_cache
->zcc_frees
++;
322 #endif /* ZALLOC_DETAILED_STATS */
327 __attribute__((noinline
))
329 zcache_alloc_from_cpu_cache_slow(zone_t zone
, struct zcc_per_cpu_cache
*per_cpu_cache
)
331 struct zcc_depot
*depot
;
334 depot
= zone
->zcache
.zcc_depot
;
335 if (depot
->zcc_depot_index
> 0) {
336 /* If able, rotate in a full magazine from the depot */
337 zcache_mag_depot_swap_for_alloc(depot
, per_cpu_cache
);
338 } else if (zcache_mag_fill_locked(zone
, per_cpu_cache
->current
)) {
339 #if ZALLOC_DETAILED_STATS
341 #endif /* ZALLOC_DETAILED_STATS */
343 #if ZALLOC_DETAILED_STATS
345 #endif /* ZALLOC_DETAILED_STATS */
346 /* If unable to allocate from cache return NULL and fall through to zalloc */
357 zcache_alloc_from_cpu_cache(zone_t zone
, zone_stats_t zstats
, vm_size_t waste
)
359 struct zcc_per_cpu_cache
*per_cpu_cache
;
362 disable_preemption();
364 per_cpu_cache
= zpercpu_get_cpu(zone
->zcache
.zcc_pcpu
, cpu
);
366 if (zcache_mag_has_elements(per_cpu_cache
->current
)) {
367 /* If able, allocate from current magazine */
368 } else if (zcache_mag_has_elements(per_cpu_cache
->previous
)) {
369 /* If able, swap current and previous magazine and retry */
370 zcache_swap_magazines(&per_cpu_cache
->previous
, &per_cpu_cache
->current
);
371 } else if (!zcache_alloc_from_cpu_cache_slow(zone
, per_cpu_cache
)) {
372 return (vm_offset_t
)NULL
;
375 struct zcc_magazine
*mag
= per_cpu_cache
->current
;
376 vm_offset_t elem_size
= zone_elem_size(zone
);
377 uint32_t index
= --mag
->zcc_magazine_index
;
378 vm_offset_t addr
= mag
->zcc_elements
[index
];
379 mag
->zcc_elements
[index
] = 0;
380 zpercpu_get_cpu(zstats
, cpu
)->zs_mem_allocated
+= elem_size
;
381 #if ZALLOC_DETAILED_STATS
383 zpercpu_get_cpu(zstats
, cpu
)->zs_mem_wasted
+= waste
;
385 per_cpu_cache
->zcc_allocs
++;
388 #endif /* ZALLOC_DETAILED_STATS */
392 #if ZALLOC_ENABLE_POISONING
393 bool validate
= addr
& ZALLOC_ELEMENT_NEEDS_VALIDATION
;
394 #endif /* ZALLOC_ENABLE_POISONING */
396 addr
&= ~ZALLOC_ELEMENT_NEEDS_VALIDATION
;
399 kasan_poison_range(addr
, elem_size
, ASAN_VALID
);
401 #if ZALLOC_ENABLE_POISONING
403 vm_offset_t backup
= addr
+ elem_size
- sizeof(vm_offset_t
);
404 zcache_validate_and_clear_canary(zone
, (vm_offset_t
*)addr
,
405 (vm_offset_t
*)backup
);
407 zalloc_validate_element(zone
, addr
, elem_size
, validate
);
408 #endif /* ZALLOC_ENABLE_POISONING */
415 * zcache_mag_fill_locked
417 * Fills a magazine with as many elements as the zone can give
418 * without blocking to carve out more memory
421 * zone zone from which to allocate
422 * mag pointer to magazine to fill
424 * Return: True if able to allocate elements, false is mag is still empty
427 zcache_mag_fill_locked(zone_t zone
, struct zcc_magazine
*mag
)
429 uint32_t i
= mag
->zcc_magazine_index
;
430 uint32_t end
= mag
->zcc_magazine_capacity
;
431 vm_offset_t elem
, addr
;
433 while (i
< end
&& zone
->countfree
) {
434 addr
= zalloc_direct_locked(zone
, Z_NOWAIT
, 0);
435 elem
= addr
& ~ZALLOC_ELEMENT_NEEDS_VALIDATION
;
436 if (addr
& ZALLOC_ELEMENT_NEEDS_VALIDATION
) {
437 zone_clear_freelist_pointers(zone
, elem
);
439 zcache_canary_add(zone
, elem
);
442 kasan_poison_range(elem
, zone_elem_size(zone
), ASAN_HEAP_FREED
);
444 mag
->zcc_elements
[i
++] = addr
;
447 mag
->zcc_magazine_index
= i
;
453 * zcache_mag_drain_locked
455 * Frees all elements in a magazine
458 * zone zone to which elements will be freed
459 * mag pointer to magazine to empty
463 zcache_mag_drain_locked(zone_t zone
, struct zcc_magazine
*mag
)
465 vm_offset_t elem
, addr
;
468 for (uint32_t i
= 0, end
= mag
->zcc_magazine_index
; i
< end
; i
++) {
469 addr
= mag
->zcc_elements
[i
];
470 poison
= addr
& ZALLOC_ELEMENT_NEEDS_VALIDATION
;
471 elem
= addr
& ~ZALLOC_ELEMENT_NEEDS_VALIDATION
;
473 #if ZALLOC_ENABLE_POISONING
474 zcache_validate_element(zone
, (vm_offset_t
*)elem
, poison
);
475 #endif /* ZALLOC_ENABLE_POISONING */
476 zfree_direct_locked(zone
, elem
, poison
);
477 mag
->zcc_elements
[i
] = 0;
479 mag
->zcc_magazine_index
= 0;
484 * zcache_mag_has_space
486 * Checks if magazine still has capacity
489 * mag pointer to magazine to check
491 * Returns: true if magazine is full
495 zcache_mag_has_space(struct zcc_magazine
*mag
)
497 return mag
->zcc_magazine_index
< mag
->zcc_magazine_capacity
;
502 * zcache_mag_has_elements
504 * Checks if magazine is empty
507 * mag pointer to magazine to check
509 * Returns: true if magazine has no elements
513 zcache_mag_has_elements(struct zcc_magazine
*mag
)
515 return mag
->zcc_magazine_index
> 0;
520 * zcache_swap_magazines
522 * Function which swaps two pointers of any type
525 * a pointer to first pointer
526 * b pointer to second pointer
529 zcache_swap_magazines(struct zcc_magazine
**a
, struct zcc_magazine
**b
)
531 struct zcc_magazine
*temp
= *a
;
538 * zcache_mag_depot_swap_for_alloc
540 * Swaps a full magazine into the current position
543 * depot pointer to the depot
544 * cache pointer to the current per-cpu cache
546 * Precondition: Check that the depot list has full elements
549 zcache_mag_depot_swap_for_alloc(struct zcc_depot
*depot
, struct zcc_per_cpu_cache
*cache
)
551 /* Loads a full magazine from which we can allocate */
552 assert(depot
->zcc_depot_index
> 0);
553 depot
->zcc_depot_index
--;
554 #if ZALLOC_DETAILED_STATS
556 #endif /* ZALLOC_DETAILED_STATS */
557 zcache_swap_magazines(&cache
->current
, &depot
->zcc_depot_list
[depot
->zcc_depot_index
]);
562 * zcache_mag_depot_swap_for_free
564 * Swaps an empty magazine into the current position
567 * depot pointer to the depot
568 * cache pointer to the current per-cpu cache
570 * Precondition: Check that the depot list has empty elements
573 zcache_mag_depot_swap_for_free(struct zcc_depot
*depot
, struct zcc_per_cpu_cache
*cache
)
575 /* Loads an empty magazine into which we can free */
576 assert(depot
->zcc_depot_index
< depot_element_count
);
577 zcache_swap_magazines(&cache
->current
, &depot
->zcc_depot_list
[depot
->zcc_depot_index
]);
578 #if ZALLOC_DETAILED_STATS
580 #endif /* ZALLOC_DETAILED_STATS */
581 depot
->zcc_depot_index
++;
587 * Adds a canary to an element by putting zcache_canary at the first
588 * and last location of the element
591 * zone zone for the element
592 * addr element address to add canary to
595 zcache_canary_add(zone_t zone
, vm_offset_t element
)
597 #if ZALLOC_ENABLE_POISONING
598 vm_offset_t
*primary
= (vm_offset_t
*)element
;
599 vm_offset_t
*backup
= (vm_offset_t
*)((vm_offset_t
)primary
+
600 zone_elem_size(zone
) - sizeof(vm_offset_t
));
601 *primary
= *backup
= (zcache_canary
^ (uintptr_t)element
);
603 #pragma unused(zone, element)
607 #if ZALLOC_ENABLE_POISONING
608 __abortlike
static void
609 zcache_validation_panic(zone_t zone
, vm_offset_t
*primary
, vm_offset_t
*backup
,
610 vm_offset_t permutation
)
612 vm_offset_t primary_value
= 0;
613 vm_offset_t backup_value
= 0;
615 if (permutation
== zcache_canary
) {
616 primary_value
= *primary
^ (vm_offset_t
)primary
;
617 backup_value
= *backup
^ (vm_offset_t
)primary
;
618 permutation
= permutation
^ (vm_offset_t
)primary
;
620 primary_value
= *primary
;
621 backup_value
= *backup
;
623 if (primary_value
!= permutation
) {
624 panic("Zone cache element was used after free! Element %p was corrupted at "
625 "beginning; Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
626 primary
, (uintptr_t) permutation
, (uintptr_t) *primary
, zcache_canary
, zone
,
627 zone_heap_name(zone
), zone
->z_name
);
629 panic("Zone cache element was used after free! Element %p was corrupted at end; "
630 "Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)",
631 primary
, (uintptr_t) permutation
, (uintptr_t) *backup
, zcache_canary
, zone
,
632 zone_heap_name(zone
), zone
->z_name
);
637 * zcache_validate_and_clear_canary
639 * Validates an element of the zone cache to make sure it still contains the zone
640 * caching canary and clears it.
643 * zone zone for the element
644 * primary addr of canary placed in front
645 * backup addr of canary placed at the back
648 zcache_validate_and_clear_canary(zone_t zone
, vm_offset_t
*primary
, vm_offset_t
*backup
)
650 vm_offset_t primary_value
= (*primary
^ (uintptr_t)primary
);
651 vm_offset_t backup_value
= (*backup
^ (uintptr_t)primary
);
653 if (primary_value
== zcache_canary
&& backup_value
== zcache_canary
) {
654 *primary
= *backup
= ZONE_POISON
;
656 zcache_validation_panic(zone
, primary
, backup
, zcache_canary
);
661 * zcache_validate_element
663 * Validates the first and last pointer size of the element to ensure
664 * that they haven't been altered. This function is used when an
665 * element moves from cache to zone, therefore only validing the
666 * first and last pointer size (location of future freelist pointers).
669 * zone zone for the element
670 * element addr of element to validate
671 * poison has the element been poisoned
674 zcache_validate_element(zone_t zone
, vm_offset_t
*element
, bool poison
)
676 vm_offset_t
*primary
= (vm_offset_t
*)element
;
677 vm_offset_t
*backup
= (vm_offset_t
*)((vm_offset_t
)primary
+
678 zone_elem_size(zone
) - sizeof(vm_offset_t
));
680 if (zone
->zfree_clear_mem
) {
681 if (*primary
== 0 && *backup
== 0) {
684 zcache_validation_panic(zone
, primary
, backup
, 0);
688 if (__probable(!poison
)) {
689 zcache_validate_and_clear_canary(zone
, primary
, backup
);
691 if (*primary
== ZONE_POISON
&& *backup
== ZONE_POISON
) {
694 zcache_validation_panic(zone
, primary
, backup
, ZONE_POISON
);
698 #endif /* ZALLOC_ENABLE_POISONING */