]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/btlog.c
50fc5991e01d8fc176fe3a287f52b45ad96f10f2
[apple/xnu.git] / osfmk / kern / btlog.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
34 #include <mach/vm_param.h>
35
36 /*
37 * Since all records are located contiguously in memory,
38 * we use indices to them as the primary lookup mechanism,
39 * and to maintain the linked list of active records
40 * in chronological order.
41 */
42 typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
43 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
44 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */)
45
46 typedef struct btlog_record {
47 btlog_recordindex_t next:24;
48 uint8_t operation;
49 #if __LP64__
50 uint32_t _pad;
51 #endif
52 void *element;
53 void *bt[]; /* variable sized, based on btlog_t params */
54 } btlog_record_t;
55
56 struct btlog {
57 vm_address_t btlog_buffer; /* all memory for this btlog_t */
58 vm_size_t btlog_buffersize;
59
60 btlog_lock_t lock_callback; /* caller-provided locking */
61 btlog_unlock_t unlock_callback;
62 void *callback_context;
63
64 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
65 size_t btrecord_count;
66 size_t btrecord_btdepth; /* BT entries per record */
67 size_t btrecord_size;
68
69 btlog_recordindex_t head; /* active record list */
70 btlog_recordindex_t tail;
71 size_t activecount;
72
73 btlog_recordindex_t freelist;
74 };
75
76 extern boolean_t kmem_alloc_ready;
77
78 #define lookup_btrecord(btlog, index) \
79 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
80
81 btlog_t *
82 btlog_create(size_t numrecords,
83 size_t record_btdepth,
84 btlog_lock_t lock_callback,
85 btlog_unlock_t unlock_callback,
86 void *callback_context)
87 {
88 btlog_t *btlog;
89 vm_size_t buffersize_needed;
90 vm_address_t buffer = 0;
91 size_t i;
92 kern_return_t ret;
93 size_t btrecord_size;
94
95 if (!kmem_alloc_ready)
96 return NULL;
97
98 if (numrecords > BTLOG_MAX_RECORDS)
99 return NULL;
100
101 if (numrecords == 0)
102 return NULL;
103
104 if (record_btdepth > BTLOG_MAX_DEPTH)
105 return NULL;
106
107 if ((lock_callback && !unlock_callback) ||
108 (!lock_callback && unlock_callback))
109 return NULL;
110
111 /* btlog_record_t is variable-sized, calculate needs now */
112 btrecord_size = sizeof(btlog_record_t)
113 + sizeof(void *) * record_btdepth;
114
115 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
116 buffersize_needed = round_page(buffersize_needed);
117
118 /* since rounding to a page size might hold more, recalculate */
119 numrecords = MIN(BTLOG_MAX_RECORDS,
120 (buffersize_needed - sizeof(btlog_t))/btrecord_size);
121
122 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed);
123 if (ret != KERN_SUCCESS)
124 return NULL;
125
126 btlog = (btlog_t *)buffer;
127 btlog->btlog_buffer = buffer;
128 btlog->btlog_buffersize = buffersize_needed;
129
130 btlog->lock_callback = lock_callback;
131 btlog->unlock_callback = unlock_callback;
132 btlog->callback_context = callback_context;
133
134 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
135 btlog->btrecord_count = numrecords;
136 btlog->btrecord_btdepth = record_btdepth;
137 btlog->btrecord_size = btrecord_size;
138
139 btlog->head = BTLOG_RECORDINDEX_NONE;
140 btlog->tail = BTLOG_RECORDINDEX_NONE;
141 btlog->activecount = 0;
142
143 /* populate freelist with all records in order */
144 btlog->freelist = 0;
145 for (i=0; i < (numrecords - 1); i++) {
146 btlog_record_t *rec = lookup_btrecord(btlog, i);
147 rec->next = (btlog_recordindex_t)(i + 1);
148 }
149 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
150
151 return btlog;
152 }
153
154 /* Assumes btlog is already locked */
155 static btlog_recordindex_t
156 btlog_get_record_from_freelist(btlog_t *btlog)
157 {
158 btlog_recordindex_t recindex = btlog->freelist;
159
160 if (recindex == BTLOG_RECORDINDEX_NONE) {
161 /* nothing on freelist */
162 return BTLOG_RECORDINDEX_NONE;
163 } else {
164 /* remove the head of the freelist */
165 btlog_record_t *record = lookup_btrecord(btlog, recindex);
166 btlog->freelist = record->next;
167 return recindex;
168 }
169 }
170
171 /* Assumes btlog is already locked */
172 static btlog_recordindex_t
173 btlog_evict_record_from_activelist(btlog_t *btlog)
174 {
175 btlog_recordindex_t recindex = btlog->head;
176
177 if (recindex == BTLOG_RECORDINDEX_NONE) {
178 /* nothing on active list */
179 return BTLOG_RECORDINDEX_NONE;
180 } else {
181 /* remove the head of the active list */
182 btlog_record_t *record = lookup_btrecord(btlog, recindex);
183 btlog->head = record->next;
184 btlog->activecount--;
185 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
186 /* active list is now empty, update tail */
187 btlog->tail = BTLOG_RECORDINDEX_NONE;
188 }
189 return recindex;
190 }
191 }
192
193 /* Assumes btlog is already locked */
194 static void
195 btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
196 {
197 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
198 /* empty active list, update both head and tail */
199 btlog->head = btlog->tail = recindex;
200 } else {
201 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
202 record->next = recindex;
203 btlog->tail = recindex;
204 }
205 btlog->activecount++;
206 }
207
208 void
209 btlog_add_entry(btlog_t *btlog,
210 void *element,
211 uint8_t operation,
212 void *bt[],
213 size_t btcount)
214 {
215 btlog_recordindex_t recindex;
216 btlog_record_t *record;
217 size_t i;
218
219 if (btlog->lock_callback)
220 btlog->lock_callback(btlog->callback_context);
221
222 /* If there's a free record, use it */
223 recindex = btlog_get_record_from_freelist(btlog);
224 if (recindex == BTLOG_RECORDINDEX_NONE) {
225 /* Use the first active record (FIFO age-out) */
226 recindex = btlog_evict_record_from_activelist(btlog);
227 assert(recindex != BTLOG_RECORDINDEX_NONE);
228 }
229
230 record = lookup_btrecord(btlog, recindex);
231
232 /* we always add to the tail, so there is no next pointer */
233 record->next = BTLOG_RECORDINDEX_NONE;
234 record->operation = operation;
235 record->element = element;
236 for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
237 record->bt[i] = bt[i];
238 }
239 for (; i < btlog->btrecord_btdepth; i++) {
240 record->bt[i] = NULL;
241 }
242
243 btlog_append_record_to_activelist(btlog, recindex);
244
245 if (btlog->unlock_callback)
246 btlog->unlock_callback(btlog->callback_context);
247 }
248
249 void
250 btlog_remove_entries_for_element(btlog_t *btlog,
251 void *element)
252 {
253 btlog_recordindex_t recindex;
254 btlog_record_t *record;
255
256 if (btlog->lock_callback)
257 btlog->lock_callback(btlog->callback_context);
258
259 /*
260 * Since the btlog_t anchors the active
261 * list with a pointer to the head of
262 * the list, first loop making sure
263 * the head is correct (and doesn't
264 * match the element being removed).
265 */
266 recindex = btlog->head;
267 record = lookup_btrecord(btlog, recindex);
268 while (recindex != BTLOG_RECORDINDEX_NONE) {
269 if (record->element == element) {
270 /* remove head of active list */
271 btlog->head = record->next;
272 btlog->activecount--;
273
274 /* add to freelist */
275 record->next = btlog->freelist;
276 btlog->freelist = recindex;
277
278 /* check the new head */
279 recindex = btlog->head;
280 record = lookup_btrecord(btlog, recindex);
281 } else {
282 /* head didn't match, so we can move on */
283 break;
284 }
285 }
286
287 if (recindex == BTLOG_RECORDINDEX_NONE) {
288 /* we iterated over the entire active list removing the element */
289 btlog->tail = BTLOG_RECORDINDEX_NONE;
290 } else {
291 /* the head of the active list is stable, now remove other entries */
292 btlog_recordindex_t precindex = recindex;
293 btlog_record_t *precord = record;
294
295 recindex = precord->next;
296 record = lookup_btrecord(btlog, recindex);
297 while (recindex != BTLOG_RECORDINDEX_NONE) {
298 if (record->element == element) {
299 /* remove in place */
300 precord->next = record->next;
301 btlog->activecount--;
302
303 /* add to freelist */
304 record->next = btlog->freelist;
305 btlog->freelist = recindex;
306
307 /* check the next record */
308 recindex = precord->next;
309 record = lookup_btrecord(btlog, recindex);
310 } else {
311 /* check the next record */
312 precindex = recindex;
313 precord = record;
314
315 recindex = record->next;
316 record = lookup_btrecord(btlog, recindex);
317 }
318 }
319
320 /* We got to the end of the active list. Update the tail */
321 btlog->tail = precindex;
322 }
323
324 if (btlog->unlock_callback)
325 btlog->unlock_callback(btlog->callback_context);
326
327 }