]> git.saurik.com Git - apple/xnu.git/blame - libkern/os/log_mem.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / os / log_mem.c
CommitLineData
c3c9b80d
A
1/*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <stdbool.h>
25#include <stdint.h>
26#include <kern/assert.h>
27#include <kern/locks.h>
28#include <os/atomic_private.h>
29
30#include "log_mem.h"
31
32#define BLOCK_INVALID ((size_t)-1)
33#define BLOCK_LEVEL_BASE(level) ((1 << (level)) - 1)
34#define BLOCK_SIZE(level) (1 << (level))
35#define BLOCK_PARENT(b) (((b) % 2 == 0) ? ((b) >> 1) - 1 : ((b) >> 1))
36#define BLOCK_LCHILD(b) (((b) << 1) + 1)
37#define BLOCK_BUDDY(b) (((b) & 0x1) ? (b) + 1 : (b) - 1)
38#define BLOCK_INDEX(lm, l, a, s) \
39 (BLOCK_LEVEL_BASE(l) + ((uintptr_t)(a) - (uintptr_t)(lm)->lm_mem) / (s))
40
41#define BITMAP_BUCKET_SIZE (8 * sizeof(((logmem_t *)0)->lm_mem_map[0]))
42#define BITMAP_BUCKET(i) ((i) / BITMAP_BUCKET_SIZE)
43#define BITMAP_BIT(i) (1 << (BITMAP_BUCKET_SIZE - ((i) % BITMAP_BUCKET_SIZE) - 1))
44
45static bool
46bitmap_get(logmem_t *lm, size_t block)
47{
48 return lm->lm_mem_map[BITMAP_BUCKET(block)] & BITMAP_BIT(block);
49}
50
51static void
52bitmap_set(logmem_t *lm, size_t block)
53{
54 lm->lm_mem_map[BITMAP_BUCKET(block)] |= BITMAP_BIT(block);
55}
56
57static void
58bitmap_clear(logmem_t *lm, size_t block)
59{
60 lm->lm_mem_map[BITMAP_BUCKET(block)] &= ~BITMAP_BIT(block);
61}
62
63static void
64bitmap_reserve_root(logmem_t *lm, size_t block)
65{
66 const size_t top_block = BLOCK_LEVEL_BASE(lm->lm_cap_order - lm->lm_max_order);
67
68 for (ssize_t next = BLOCK_PARENT(block); next >= top_block; next = BLOCK_PARENT(next)) {
69 /*
70 * If the rest of the root path is already marked as
71 * allocated we are done.
72 */
73 if (bitmap_get(lm, next)) {
74 break;
75 }
76 bitmap_set(lm, next);
77 }
78}
79
80static void
81bitmap_release_root(logmem_t *lm, size_t block)
82{
83 const size_t top_block = BLOCK_LEVEL_BASE(lm->lm_cap_order - lm->lm_max_order);
84 int buddy_allocated = 0;
85
86 while (block > top_block) {
87 buddy_allocated = bitmap_get(lm, BLOCK_BUDDY(block));
88 block = BLOCK_PARENT(block);
89 /*
90 * If there is another allocation within the parent subtree
91 * in place we cannot mark the rest of the root path as free.
92 */
93 if (buddy_allocated) {
94 break;
95 }
96 bitmap_clear(lm, block);
97 }
98}
99
100static void
101bitmap_update_subtree(logmem_t *lm, size_t level, size_t block, void (*fun)(logmem_t *, size_t))
102{
103 const size_t lcount = lm->lm_cap_order - lm->lm_min_order - level + 1;
104
105 for (size_t l = 0, n = 1; l < lcount; l++, n <<= 1) {
106 for (int i = 0; i < n; i++) {
107 fun(lm, block + i);
108 }
109 block = BLOCK_LCHILD(block);
110 }
111}
112
113static void
114bitmap_release_subtree(logmem_t *lm, size_t level, size_t block)
115{
116 bitmap_update_subtree(lm, level, block, bitmap_clear);
117}
118
119static void
120bitmap_reserve_subtree(logmem_t *lm, size_t level, size_t block)
121{
122 bitmap_update_subtree(lm, level, block, bitmap_set);
123}
124
125static size_t
126block_size_level(logmem_t *lm, size_t amount)
127{
128 for (size_t l = lm->lm_min_order; l <= lm->lm_max_order; l++) {
129 if (amount <= BLOCK_SIZE(l)) {
130 return lm->lm_cap_order - l;
131 }
132 }
133 return BLOCK_INVALID;
134}
135
136static size_t
137block_locate(logmem_t *lm, void *addr, size_t amount, size_t *block)
138{
139 size_t level = block_size_level(lm, amount);
140 if (level != BLOCK_INVALID) {
141 *block = BLOCK_INDEX(lm, level, addr, amount);
142 }
143 return level;
144}
145
146static size_t
147block_reserve(logmem_t *lm, size_t level)
148{
149 assert(level != BLOCK_INVALID);
150
151 const size_t base = BLOCK_LEVEL_BASE(level);
152 const size_t end = base + BLOCK_SIZE(level);
153
154 lck_spin_lock(lm->lm_lock);
155 for (size_t block = base; block < end; block++) {
156 if (!bitmap_get(lm, block)) {
157 bitmap_reserve_root(lm, block);
158 bitmap_reserve_subtree(lm, level, block);
159 lck_spin_unlock(lm->lm_lock);
160 return block - base;
161 }
162 }
163 lck_spin_unlock(lm->lm_lock);
164
165 return BLOCK_INVALID;
166}
167
168void *
169logmem_alloc(logmem_t *lm, size_t *amount)
170{
171 assert(amount);
172
173 os_atomic_inc(&lm->lm_cnt_allocations, relaxed);
174
175 if (*amount == 0 || *amount > BLOCK_SIZE(lm->lm_max_order)) {
176 os_atomic_inc(&lm->lm_cnt_failed_size, relaxed);
177 return NULL;
178 }
179
180 size_t level = block_size_level(lm, *amount);
181 size_t block = block_reserve(lm, level);
182
183 if (block == BLOCK_INVALID) {
184 os_atomic_inc(&lm->lm_cnt_failed_full, relaxed);
185 return NULL;
186 }
187
188 *amount = BLOCK_SIZE(lm->lm_cap_order - level);
189 os_atomic_sub(&lm->lm_cnt_free, (uint32_t)*amount, relaxed);
190
191 return &lm->lm_mem[block * *amount];
192}
193
194void
195logmem_free(logmem_t *lm, void *addr, size_t amount)
196{
197 assert(addr);
198 assert(amount > 0 && ((amount & (amount - 1)) == 0));
199
200 size_t block = BLOCK_INVALID;
201 size_t level = block_locate(lm, addr, amount, &block);
202 assert(level != BLOCK_INVALID);
203 assert(block != BLOCK_INVALID);
204
205 lck_spin_lock(lm->lm_lock);
206 bitmap_release_root(lm, block);
207 bitmap_release_subtree(lm, level, block);
208 lck_spin_unlock(lm->lm_lock);
209
210 os_atomic_add(&lm->lm_cnt_free, (uint32_t)amount, relaxed);
211}
212
213size_t
214logmem_max_size(const logmem_t *lm)
215{
216 return BLOCK_SIZE(lm->lm_max_order);
217}