]>
git.saurik.com Git - apple/xnu.git/blob - libkern/os/log_mem.c
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
26 #include <kern/assert.h>
27 #include <kern/locks.h>
28 #include <os/atomic_private.h>
32 #define BLOCK_INVALID ((size_t)-1)
33 #define BLOCK_LEVEL_BASE(level) ((1 << (level)) - 1)
34 #define BLOCK_SIZE(level) (1 << (level))
35 #define BLOCK_PARENT(b) (((b) % 2 == 0) ? ((b) >> 1) - 1 : ((b) >> 1))
36 #define BLOCK_LCHILD(b) (((b) << 1) + 1)
37 #define BLOCK_BUDDY(b) (((b) & 0x1) ? (b) + 1 : (b) - 1)
38 #define BLOCK_INDEX(lm, l, a, s) \
39 (BLOCK_LEVEL_BASE(l) + ((uintptr_t)(a) - (uintptr_t)(lm)->lm_mem) / (s))
41 #define BITMAP_BUCKET_SIZE (8 * sizeof(((logmem_t *)0)->lm_mem_map[0]))
42 #define BITMAP_BUCKET(i) ((i) / BITMAP_BUCKET_SIZE)
43 #define BITMAP_BIT(i) (1 << (BITMAP_BUCKET_SIZE - ((i) % BITMAP_BUCKET_SIZE) - 1))
46 bitmap_get(logmem_t
*lm
, size_t block
)
48 return lm
->lm_mem_map
[BITMAP_BUCKET(block
)] & BITMAP_BIT(block
);
52 bitmap_set(logmem_t
*lm
, size_t block
)
54 lm
->lm_mem_map
[BITMAP_BUCKET(block
)] |= BITMAP_BIT(block
);
58 bitmap_clear(logmem_t
*lm
, size_t block
)
60 lm
->lm_mem_map
[BITMAP_BUCKET(block
)] &= ~BITMAP_BIT(block
);
64 bitmap_reserve_root(logmem_t
*lm
, size_t block
)
66 const size_t top_block
= BLOCK_LEVEL_BASE(lm
->lm_cap_order
- lm
->lm_max_order
);
68 for (ssize_t next
= BLOCK_PARENT(block
); next
>= top_block
; next
= BLOCK_PARENT(next
)) {
70 * If the rest of the root path is already marked as
71 * allocated we are done.
73 if (bitmap_get(lm
, next
)) {
81 bitmap_release_root(logmem_t
*lm
, size_t block
)
83 const size_t top_block
= BLOCK_LEVEL_BASE(lm
->lm_cap_order
- lm
->lm_max_order
);
84 int buddy_allocated
= 0;
86 while (block
> top_block
) {
87 buddy_allocated
= bitmap_get(lm
, BLOCK_BUDDY(block
));
88 block
= BLOCK_PARENT(block
);
90 * If there is another allocation within the parent subtree
91 * in place we cannot mark the rest of the root path as free.
93 if (buddy_allocated
) {
96 bitmap_clear(lm
, block
);
101 bitmap_update_subtree(logmem_t
*lm
, size_t level
, size_t block
, void (*fun
)(logmem_t
*, size_t))
103 const size_t lcount
= lm
->lm_cap_order
- lm
->lm_min_order
- level
+ 1;
105 for (size_t l
= 0, n
= 1; l
< lcount
; l
++, n
<<= 1) {
106 for (int i
= 0; i
< n
; i
++) {
109 block
= BLOCK_LCHILD(block
);
114 bitmap_release_subtree(logmem_t
*lm
, size_t level
, size_t block
)
116 bitmap_update_subtree(lm
, level
, block
, bitmap_clear
);
120 bitmap_reserve_subtree(logmem_t
*lm
, size_t level
, size_t block
)
122 bitmap_update_subtree(lm
, level
, block
, bitmap_set
);
126 block_size_level(logmem_t
*lm
, size_t amount
)
128 for (size_t l
= lm
->lm_min_order
; l
<= lm
->lm_max_order
; l
++) {
129 if (amount
<= BLOCK_SIZE(l
)) {
130 return lm
->lm_cap_order
- l
;
133 return BLOCK_INVALID
;
137 block_locate(logmem_t
*lm
, void *addr
, size_t amount
, size_t *block
)
139 size_t level
= block_size_level(lm
, amount
);
140 if (level
!= BLOCK_INVALID
) {
141 *block
= BLOCK_INDEX(lm
, level
, addr
, amount
);
147 block_reserve(logmem_t
*lm
, size_t level
)
149 assert(level
!= BLOCK_INVALID
);
151 const size_t base
= BLOCK_LEVEL_BASE(level
);
152 const size_t end
= base
+ BLOCK_SIZE(level
);
154 lck_spin_lock(lm
->lm_lock
);
155 for (size_t block
= base
; block
< end
; block
++) {
156 if (!bitmap_get(lm
, block
)) {
157 bitmap_reserve_root(lm
, block
);
158 bitmap_reserve_subtree(lm
, level
, block
);
159 lck_spin_unlock(lm
->lm_lock
);
163 lck_spin_unlock(lm
->lm_lock
);
165 return BLOCK_INVALID
;
169 logmem_alloc(logmem_t
*lm
, size_t *amount
)
173 os_atomic_inc(&lm
->lm_cnt_allocations
, relaxed
);
175 if (*amount
== 0 || *amount
> BLOCK_SIZE(lm
->lm_max_order
)) {
176 os_atomic_inc(&lm
->lm_cnt_failed_size
, relaxed
);
180 size_t level
= block_size_level(lm
, *amount
);
181 size_t block
= block_reserve(lm
, level
);
183 if (block
== BLOCK_INVALID
) {
184 os_atomic_inc(&lm
->lm_cnt_failed_full
, relaxed
);
188 *amount
= BLOCK_SIZE(lm
->lm_cap_order
- level
);
189 os_atomic_sub(&lm
->lm_cnt_free
, (uint32_t)*amount
, relaxed
);
191 return &lm
->lm_mem
[block
* *amount
];
195 logmem_free(logmem_t
*lm
, void *addr
, size_t amount
)
198 assert(amount
> 0 && ((amount
& (amount
- 1)) == 0));
200 size_t block
= BLOCK_INVALID
;
201 size_t level
= block_locate(lm
, addr
, amount
, &block
);
202 assert(level
!= BLOCK_INVALID
);
203 assert(block
!= BLOCK_INVALID
);
205 lck_spin_lock(lm
->lm_lock
);
206 bitmap_release_root(lm
, block
);
207 bitmap_release_subtree(lm
, level
, block
);
208 lck_spin_unlock(lm
->lm_lock
);
210 os_atomic_add(&lm
->lm_cnt_free
, (uint32_t)amount
, relaxed
);
214 logmem_max_size(const logmem_t
*lm
)
216 return BLOCK_SIZE(lm
->lm_max_order
);