2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/assert.h>
30 #include <kern/cpu_data.h>
31 #include <mach/mach_host.h>
32 #include <vm/vm_kern.h>
35 #if defined(__i386__) || defined(__x86_64__)
39 #if defined (__arm__) || defined (__arm64__)
40 #include <arm/cpu_data_internal.h>
43 #define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */
44 #define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */
45 #define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/
47 lck_grp_t zcache_locks_grp
; /* lock group for depot_lock */
48 zone_t magazine_zone
; /* zone to allocate zcc_magazine structs from */
49 uint16_t magazine_element_count
= 0; /* Size of array in magazine determined by boot-arg or default */
50 uint16_t depot_element_count
= 0; /* Size of depot lists determined by boot-arg or default */
51 bool zone_cache_ready
= FALSE
; /* Flag to check if zone caching has been set up by zcache_bootstrap */
52 uintptr_t zcache_canary
= 0; /* Canary used for the caching layer to prevent UaF attacks */
54 /* The zcc_magazine is used as a stack to store cached zone elements. These
55 * sets of elements can be moved around to perform bulk operations.
58 uint32_t zcc_magazine_index
; /* Used as a stack pointer to acess elements in the array */
59 uint32_t zcc_magazine_capacity
; /* Number of pointers able to be stored in the zcc_elements array */
60 void *zcc_elements
[0]; /* Array of pointers to objects */
64 /* Each CPU will use one of these to store its elements
66 struct zcc_per_cpu_cache
{
67 struct zcc_magazine
*current
; /* Magazine from which we will always try to allocate from and free to first */
68 struct zcc_magazine
*previous
; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */
69 } __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE
))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */
73 * The depot layer can be invalid while zone_gc() is draining it out.
74 * During that time, the CPU caches are active. For CPU magazine allocs and
75 * frees, the caching layer reaches directly into the zone allocator.
77 #define ZCACHE_DEPOT_INVALID -1
78 #define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID)
80 /* This is the basic struct to take care of cahing and is included within
84 lck_mtx_t zcc_depot_lock
; /* Lock for the depot layer of caching */
85 struct zcc_per_cpu_cache zcc_per_cpu_caches
[MAX_CPUS
]; /* An array of caches, one for each CPU */
86 int zcc_depot_index
; /* marks the point in the array where empty magazines begin */
87 struct zcc_magazine
*zcc_depot_list
[0]; /* Stores full and empty magazines in the depot layer */
91 void zcache_init_marked_zones(void);
92 bool zcache_mag_fill(zone_t zone
, struct zcc_magazine
*mag
);
93 void zcache_mag_drain(zone_t zone
, struct zcc_magazine
*mag
);
94 void zcache_mag_init(struct zcc_magazine
*mag
, int count
);
95 void *zcache_mag_pop(struct zcc_magazine
*mag
);
96 void zcache_mag_push(struct zcc_magazine
*mag
, void *elem
);
97 bool zcache_mag_has_space(struct zcc_magazine
*mag
);
98 bool zcache_mag_has_elements(struct zcc_magazine
*mag
);
99 void zcache_swap_magazines(struct zcc_magazine
**a
, struct zcc_magazine
**b
);
100 void zcache_mag_depot_swap_for_alloc(struct zone_cache
*depot
, struct zcc_per_cpu_cache
*cache
);
101 void zcache_mag_depot_swap_for_free(struct zone_cache
*depot
, struct zcc_per_cpu_cache
*cache
);
102 void zcache_mag_depot_swap(struct zone_cache
*depot
, struct zcc_per_cpu_cache
*cache
, boolean_t load_full
);
103 void zcache_canary_add(zone_t zone
, void *addr
);
104 void zcache_canary_validate(zone_t zone
, void *addr
);
109 * Description: returns whether or not the zone caches are ready to use
115 return zone_cache_ready
;
119 * zcache_init_marked_zones
121 * Description: Initializes all parts of the per-cpu caches for the list of
122 * marked zones once we are able to initalize caches. This should
123 * only be called once, and will be called during the time that the
124 * system is single threaded so we don't have to take the lock.
128 zcache_init_marked_zones(void)
131 for (i
= 0; i
< num_zones
; i
++) {
132 if (zone_array
[i
].cpu_cache_enable_when_ready
) {
133 zcache_init(&zone_array
[i
]);
134 zone_array
[i
].cpu_cache_enable_when_ready
= FALSE
;
142 * Description: initializes zone to allocate magazines from and sets
143 * magazine_element_count and depot_element_count from
144 * boot-args or default values
148 zcache_bootstrap(void)
150 /* use boot-arg for custom magazine size*/
151 if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count
, sizeof(uint16_t))) {
152 magazine_element_count
= DEFAULT_MAGAZINE_SIZE
;
155 int magazine_size
= sizeof(struct zcc_magazine
) + magazine_element_count
* sizeof(void *);
157 magazine_zone
= zinit(magazine_size
, 100000 * magazine_size
, magazine_size
, "zcc_magazine_zone");
159 assert(magazine_zone
!= NULL
);
161 /* use boot-arg for custom depot size*/
162 if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count
, sizeof(uint16_t))) {
163 depot_element_count
= DEFAULT_DEPOT_SIZE
;
166 lck_grp_init(&zcache_locks_grp
, "zcc_depot_lock", LCK_GRP_ATTR_NULL
);
168 /* Generate the canary value for zone caches */
169 zcache_canary
= (uintptr_t) early_random();
171 zone_cache_ready
= TRUE
;
173 zcache_init_marked_zones();
180 * Description: Initializes all parts of the per-cpu caches for a given zone
182 * Parameters: zone pointer to zone on which to iniitalize caching
186 zcache_init(zone_t zone
)
188 int i
; /* used as index in for loops */
189 vm_size_t total_size
; /* Used for allocating the zone_cache struct with the proper size of depot list */
190 struct zone_cache
*temp_cache
; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */
192 /* Allocate chunk of memory for all structs */
193 total_size
= sizeof(struct zone_cache
) + (depot_element_count
* sizeof(void *));
195 temp_cache
= (struct zone_cache
*) kalloc(total_size
);
198 /* Initialize a cache for every CPU */
199 for (i
= 0; i
< MAX_CPUS
; i
++) {
200 temp_cache
->zcc_per_cpu_caches
[i
].current
= (struct zcc_magazine
*)zalloc(magazine_zone
);
201 temp_cache
->zcc_per_cpu_caches
[i
].previous
= (struct zcc_magazine
*)zalloc(magazine_zone
);
203 assert(temp_cache
->zcc_per_cpu_caches
[i
].current
!= NULL
&& temp_cache
->zcc_per_cpu_caches
[i
].previous
!= NULL
);
205 zcache_mag_init(temp_cache
->zcc_per_cpu_caches
[i
].current
, magazine_element_count
);
206 zcache_mag_init(temp_cache
->zcc_per_cpu_caches
[i
].previous
, magazine_element_count
);
209 /* Initialize the lock on the depot layer */
210 lck_mtx_init(&(temp_cache
->zcc_depot_lock
), &zcache_locks_grp
, LCK_ATTR_NULL
);
212 /* Initialize empty magazines in the depot list */
213 for (i
= 0; i
< depot_element_count
; i
++) {
214 temp_cache
->zcc_depot_list
[i
] = (struct zcc_magazine
*)zalloc(magazine_zone
);
216 assert(temp_cache
->zcc_depot_list
[i
] != NULL
);
218 zcache_mag_init(temp_cache
->zcc_depot_list
[i
], magazine_element_count
);
221 temp_cache
->zcc_depot_index
= 0;
224 zone
->zcache
= temp_cache
;
225 /* Set flag to know caching is enabled */
226 zone
->cpu_cache_enabled
= TRUE
;
234 * Description: Frees all the full magazines from the depot layer to the zone allocator as part
235 * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock
238 * Parameters: zone pointer to zone for which the depot layer needs to be drained
244 zcache_drain_depot(zone_t zone
)
246 struct zone_cache
*zcache
= zone
->zcache
;
247 int drain_depot_index
= 0;
250 * Grab the current depot list from the zone cache. If it has full magazines,
251 * mark the depot as invalid and drain it.
253 lck_mtx_lock_spin_always(&(zcache
->zcc_depot_lock
));
254 if (!zcache_depot_available(zcache
) || (zcache
->zcc_depot_index
== 0)) {
255 /* no full magazines in the depot or depot unavailable; nothing to drain here */
256 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
259 drain_depot_index
= zcache
->zcc_depot_index
;
260 /* Mark the depot as unavailable */
261 zcache
->zcc_depot_index
= ZCACHE_DEPOT_INVALID
;
262 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
264 /* Now drain the full magazines in the depot */
265 for (int i
= 0; i
< drain_depot_index
; i
++) {
266 zcache_mag_drain(zone
, zcache
->zcc_depot_list
[i
]);
269 lck_mtx_lock_spin_always(&(zcache
->zcc_depot_lock
));
270 /* Mark the depot as available again */
271 zcache
->zcc_depot_index
= 0;
272 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
277 * zcache_free_to_cpu_cache
279 * Description: Checks per-cpu caches to free element there if possible
281 * Parameters: zone pointer to zone for which element comes from
282 * addr pointer to element to free
284 * Returns: TRUE if successfull, FALSE otherwise
286 * Precondition: check that caching is enabled for zone
289 zcache_free_to_cpu_cache(zone_t zone
, void *addr
)
291 int curcpu
; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
292 struct zone_cache
*zcache
; /* local storage of the zone's cache */
293 struct zcc_per_cpu_cache
*per_cpu_cache
; /* locally store the current per_cpu_cache */
295 disable_preemption();
296 curcpu
= current_processor()->cpu_id
;
297 zcache
= zone
->zcache
;
298 per_cpu_cache
= &zcache
->zcc_per_cpu_caches
[curcpu
];
300 if (zcache_mag_has_space(per_cpu_cache
->current
)) {
301 /* If able, free into current magazine */
302 goto free_to_current
;
303 } else if (zcache_mag_has_space(per_cpu_cache
->previous
)) {
304 /* If able, swap current and previous magazine and retry */
305 zcache_swap_magazines(&per_cpu_cache
->previous
, &per_cpu_cache
->current
);
306 goto free_to_current
;
308 lck_mtx_lock_spin_always(&(zcache
->zcc_depot_lock
));
309 if (zcache_depot_available(zcache
) && (zcache
->zcc_depot_index
< depot_element_count
)) {
310 /* If able, rotate in a new empty magazine from the depot and retry */
311 zcache_mag_depot_swap_for_free(zcache
, per_cpu_cache
);
312 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
313 goto free_to_current
;
315 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
316 /* Attempt to free an entire magazine of elements */
317 zcache_mag_drain(zone
, per_cpu_cache
->current
);
318 if (zcache_mag_has_space(per_cpu_cache
->current
)) {
319 goto free_to_current
;
323 /* If not able to use cache return FALSE and fall through to zfree */
328 assert(zcache_mag_has_space(per_cpu_cache
->current
));
329 zcache_canary_add(zone
, addr
);
330 zcache_mag_push(per_cpu_cache
->current
, addr
);
333 kasan_poison_range((vm_offset_t
)addr
, zone
->elem_size
, ASAN_HEAP_FREED
);
342 * zcache_alloc_from_cpu_cache
344 * Description: Checks per-cpu caches to allocate element from there if possible
346 * Parameters: zone pointer to zone for which element will come from
348 * Returns: pointer to usable element
350 * Precondition: check that caching is enabled for zone
353 zcache_alloc_from_cpu_cache(zone_t zone
)
355 int curcpu
; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */
356 void *ret
= NULL
; /* Points to the element which will be returned */
357 struct zone_cache
*zcache
; /* local storage of the zone's cache */
358 struct zcc_per_cpu_cache
*per_cpu_cache
; /* locally store the current per_cpu_cache */
360 disable_preemption();
361 curcpu
= current_processor()->cpu_id
;
362 zcache
= zone
->zcache
;
363 per_cpu_cache
= &zcache
->zcc_per_cpu_caches
[curcpu
];
365 if (zcache_mag_has_elements(per_cpu_cache
->current
)) {
366 /* If able, allocate from current magazine */
367 goto allocate_from_current
;
368 } else if (zcache_mag_has_elements(per_cpu_cache
->previous
)) {
369 /* If able, swap current and previous magazine and retry */
370 zcache_swap_magazines(&per_cpu_cache
->previous
, &per_cpu_cache
->current
);
371 goto allocate_from_current
;
373 lck_mtx_lock_spin_always(&(zcache
->zcc_depot_lock
));
374 if (zcache_depot_available(zcache
) && (zcache
->zcc_depot_index
> 0)) {
375 /* If able, rotate in a full magazine from the depot */
376 zcache_mag_depot_swap_for_alloc(zcache
, per_cpu_cache
);
377 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
378 goto allocate_from_current
;
380 lck_mtx_unlock(&(zcache
->zcc_depot_lock
));
381 /* Attempt to allocate an entire magazine of elements */
382 if (zcache_mag_fill(zone
, per_cpu_cache
->current
)) {
383 goto allocate_from_current
;
387 /* If unable to allocate from cache return NULL and fall through to zalloc */
389 return (vm_offset_t
) NULL
;
391 allocate_from_current
:
392 ret
= zcache_mag_pop(per_cpu_cache
->current
);
394 zcache_canary_validate(zone
, ret
);
397 kasan_poison_range((vm_offset_t
)ret
, zone
->elem_size
, ASAN_VALID
);
401 return (vm_offset_t
) ret
;
408 * Description: initializes fields in a zcc_magazine struct
410 * Parameters: mag pointer to magazine to initialize
414 zcache_mag_init(struct zcc_magazine
*mag
, int count
)
416 mag
->zcc_magazine_index
= 0;
417 mag
->zcc_magazine_capacity
= count
;
424 * Description: fills a magazine with as many elements as the zone can give
425 * without blocking to carve out more memory
427 * Parameters: zone zone from which to allocate
428 * mag pointer to magazine to fill
430 * Return: True if able to allocate elements, false is mag is still empty
433 zcache_mag_fill(zone_t zone
, struct zcc_magazine
*mag
)
435 assert(mag
->zcc_magazine_index
== 0);
439 for (i
= mag
->zcc_magazine_index
; i
< mag
->zcc_magazine_capacity
; i
++) {
440 elem
= zalloc_attempt(zone
);
442 zcache_canary_add(zone
, elem
);
443 zcache_mag_push(mag
, elem
);
445 kasan_poison_range((vm_offset_t
)elem
, zone
->elem_size
, ASAN_HEAP_FREED
);
461 * Description: frees all elements in a magazine
463 * Parameters: zone zone to which elements will be freed
464 * mag pointer to magazine to empty
468 zcache_mag_drain(zone_t zone
, struct zcc_magazine
*mag
)
470 assert(mag
->zcc_magazine_index
== mag
->zcc_magazine_capacity
);
472 while (mag
->zcc_magazine_index
> 0) {
473 uint32_t index
= --mag
->zcc_magazine_index
;
474 zcache_canary_validate(zone
, mag
->zcc_elements
[index
]);
475 zfree_direct(zone
, (vm_offset_t
)mag
->zcc_elements
[index
]);
476 mag
->zcc_elements
[mag
->zcc_magazine_index
] = 0;
484 * Description: removes last element from magazine in a stack pop fashion
485 * zcc_magazine_index represents the number of elements on the
486 * stack, so it the index of where to save the next element, when
487 * full, it will be 1 past the last index of the array
489 * Parameters: mag pointer to magazine from which to remove element
491 * Returns: pointer to element removed from magazine
493 * Precondition: must check that magazine is not empty before calling
496 zcache_mag_pop(struct zcc_magazine
*mag
)
499 assert(zcache_mag_has_elements(mag
));
500 elem
= mag
->zcc_elements
[--mag
->zcc_magazine_index
];
501 /* Ensure pointer to element cannot be accessed after we pop it */
502 mag
->zcc_elements
[mag
->zcc_magazine_index
] = NULL
;
503 assert(elem
!= NULL
);
511 * Description: adds element to magazine and increments zcc_magazine_index
512 * zcc_magazine_index represents the number of elements on the
513 * stack, so it the index of where to save the next element, when
514 * full, it will be 1 past the last index of the array
516 * Parameters: mag pointer to magazine from which to remove element
517 * elem pointer to element to add
519 * Precondition: must check that magazine is not full before calling
522 zcache_mag_push(struct zcc_magazine
*mag
, void *elem
)
524 assert(zcache_mag_has_space(mag
));
525 mag
->zcc_elements
[mag
->zcc_magazine_index
++] = elem
;
530 * zcache_mag_has_space
532 * Description: checks if magazine still has capacity
534 * Parameters: mag pointer to magazine to check
536 * Returns: true if magazine is full
540 zcache_mag_has_space(struct zcc_magazine
*mag
)
542 return mag
->zcc_magazine_index
< mag
->zcc_magazine_capacity
;
547 * zcache_mag_has_elements
549 * Description: checks if magazine is empty
551 * Parameters: mag pointer to magazine to check
553 * Returns: true if magazine has no elements
557 zcache_mag_has_elements(struct zcc_magazine
*mag
)
559 return mag
->zcc_magazine_index
> 0;
564 * zcache_swap_magazines
566 * Description: Function which swaps two pointers of any type
568 * Parameters: a pointer to first pointer
569 * b pointer to second pointer
572 zcache_swap_magazines(struct zcc_magazine
**a
, struct zcc_magazine
**b
)
574 struct zcc_magazine
*temp
= *a
;
581 * zcache_mag_depot_swap_for_alloc
583 * Description: Swaps a full magazine into the current position
585 * Parameters: zcache pointer to the zone_cache to access the depot
586 * cache pointer to the current per-cpu cache
588 * Precondition: Check that the depot list has full elements
591 zcache_mag_depot_swap_for_alloc(struct zone_cache
*zcache
, struct zcc_per_cpu_cache
*cache
)
593 /* Loads a full magazine from which we can allocate */
594 assert(zcache_depot_available(zcache
));
595 assert(zcache
->zcc_depot_index
> 0);
596 zcache
->zcc_depot_index
--;
597 zcache_swap_magazines(&cache
->current
, &zcache
->zcc_depot_list
[zcache
->zcc_depot_index
]);
602 * zcache_mag_depot_swap_for_free
604 * Description: Swaps an empty magazine into the current position
606 * Parameters: zcache pointer to the zone_cache to access the depot
607 * cache pointer to the current per-cpu cache
609 * Precondition: Check that the depot list has empty elements
612 zcache_mag_depot_swap_for_free(struct zone_cache
*zcache
, struct zcc_per_cpu_cache
*cache
)
614 /* Loads an empty magazine into which we can free */
615 assert(zcache_depot_available(zcache
));
616 assert(zcache
->zcc_depot_index
< depot_element_count
);
617 zcache_swap_magazines(&cache
->current
, &zcache
->zcc_depot_list
[zcache
->zcc_depot_index
]);
618 zcache
->zcc_depot_index
++;
624 * Description: Adds a canary to an element by putting zcache_canary at the first
625 * and last location of the element
627 * Parameters: zone zone for the element
628 * addr element address to add canary to
632 zcache_canary_add(zone_t zone
, void *element
)
634 vm_offset_t
*primary
= (vm_offset_t
*)element
;
635 vm_offset_t
*backup
= (vm_offset_t
*)((vm_offset_t
)primary
+ zone
->elem_size
- sizeof(vm_offset_t
));
636 *primary
= *backup
= (zcache_canary
^ (uintptr_t)element
);
640 * zcache_canary_validate
642 * Description: Validates an element of the zone cache to make sure it still contains the zone
645 * Parameters: zone zone for the element
646 * addr element address to validate
650 zcache_canary_validate(zone_t zone
, void *element
)
652 vm_offset_t
*primary
= (vm_offset_t
*)element
;
653 vm_offset_t
*backup
= (vm_offset_t
*)((vm_offset_t
)primary
+ zone
->elem_size
- sizeof(vm_offset_t
));
655 vm_offset_t primary_value
= (*primary
^ (uintptr_t)element
);
656 if (primary_value
!= zcache_canary
) {
657 panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p",
658 element
, (void *)(zcache_canary
^ (uintptr_t)element
), (void *)(*primary
), (void *)zcache_canary
);
661 vm_offset_t backup_value
= (*backup
^ (uintptr_t)element
);
662 if (backup_value
!= zcache_canary
) {
663 panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p",
664 element
, (void *)(zcache_canary
^ (uintptr_t)element
), (void *)(*backup
), (void *)zcache_canary
);