]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/btlog.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / btlog.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
39236c6e
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
39236c6e
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
39236c6e
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
39236c6e
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stddef.h>
30#include <kern/btlog.h>
31#include <kern/assert.h>
32#include <vm/vm_kern.h>
33#include <vm/vm_map.h>
fe8ab488 34#include <vm/pmap.h>
39236c6e 35#include <mach/vm_param.h>
39037602
A
36#define _SYS_TYPES_H_
37#include <libkern/crypto/md5.h>
38#include <libkern/crypto/crypto_internal.h>
39236c6e
A
39
40/*
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
45 */
0a7de745 46#define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
39037602
A
47#define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
48
49/*
50 * Each record is a stack with a reference count and a list of
0a7de745 51 * log elements that refer to it.
39037602
A
52 *
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
55 * that it references.
56 *
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
61 */
62
63#define ELEMENT_HASH_BUCKET_COUNT (256)
64#define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
65
0a7de745
A
66#define ZELEMS_DEFAULT (8000)
67size_t zelems_count = 0;
39037602
A
68
69typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
70
71/*
72 * Queue head for the queue of elements connected to a particular record (stack).
0a7de745 73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
39037602 74 */
0a7de745 75TAILQ_HEAD(_element_record_queue, btlog_element);
39037602 76
0a7de745 77/*
39037602 78 * Queue head for the queue of elements that hash to the same bucket.
0a7de745 79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
39037602
A
80 */
81TAILQ_HEAD(_element_hash_queue, btlog_element);
39236c6e
A
82
83typedef struct btlog_record {
0a7de745
A
84 btlog_recordindex_t next:24,
85 operation:8;
86 uint32_t ref_count;
87 uint32_t bthash;
88 struct _element_record_queue element_record_queue;
89 void *bt[];/* variable sized, based on btlog_t params */
39236c6e
A
90} btlog_record_t;
91
39037602 92typedef struct btlog_element {
0a7de745
A
93 btlog_recordindex_t recindex:24,
94 operation:8;
39037602
A
95 uintptr_t elem;
96 TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */
97
98 TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain.
0a7de745
A
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
101 *
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
103 */
39037602
A
104} btlog_element_t;
105
39236c6e 106struct btlog {
0a7de745
A
107 vm_address_t btlog_buffer; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize;
109
110 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth;/* BT entries per record */
112 size_t btrecord_size;
113
114 btlog_recordindex_t head; /* active record list */
115 btlog_recordindex_t tail;
116 btlog_recordindex_t activerecord;
117 btlog_recordindex_t freelist_records;
118
119 size_t active_record_count;
120 size_t active_element_count;
121 btlog_element_t *freelist_elements;
122 union {
123 btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
125 } elem_linkage_un;
126
127 decl_simple_lock_data(, btlog_lock);
128 boolean_t caller_will_remove_entries_for_element;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
131 */
39236c6e
A
132};
133
fe8ab488 134extern boolean_t vm_kernel_ready;
39236c6e
A
135extern boolean_t kmem_alloc_ready;
136
137#define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
139
39037602
A
140uint32_t calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog);
141uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount);
142
143void btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *hash_elem);
144btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog);
145
146uint32_t
147lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount)
148{
0a7de745
A
149 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
150 btlog_record_t *record = NULL;
151 size_t i = 0;
152 boolean_t stack_matched = TRUE;
39037602
A
153
154 assert(btcount);
155 assert(bt);
156
157 recindex = btlog->head;
158 record = lookup_btrecord(btlog, recindex);
159 while (recindex != BTLOG_RECORDINDEX_NONE) {
0a7de745 160 assert(!TAILQ_EMPTY(&record->element_record_queue));
39037602 161 if (record->bthash == md5_hash) {
39037602
A
162 /*
163 * Make sure that the incoming stack actually matches the
164 * stack in this record. Since we only save off a
165 * part of the md5 hash there can be collisions sometimes.
166 * This comparison isn't costly because, in case of collisions,
167 * usually the first few frames are different.
168 */
169
170 stack_matched = TRUE;
171
172 if (btcount < btlog->btrecord_btdepth) {
173 if (record->bt[btcount] != NULL) {
174 /*
175 * If the stack depth passed in is smaller than
176 * the recorded stack and we have a valid pointer
177 * in the recorded stack at that depth, then we
178 * don't need to do any further checks.
179 */
180 stack_matched = FALSE;
181 goto next;
182 }
183 }
184
0a7de745 185 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
39037602
A
186 if (record->bt[i] != bt[i]) {
187 stack_matched = FALSE;
188 goto next;
189 }
190 }
191
192 if (stack_matched == TRUE) {
193 break;
194 }
195 }
196next:
197 recindex = record->next;
198 record = lookup_btrecord(btlog, recindex);
199 }
200
201 return recindex;
202}
203
204uint32_t
205calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog)
206{
207 if (btlog->caller_will_remove_entries_for_element) {
208 uint32_t addr = 0;
209
210 addr = (uint32_t) ((elem & 0xFF00) >> 0x8);
211
212 return addr;
213 } else {
214 return 0;
215 }
216}
217
218static void
219btlog_lock(btlog_t *btlog)
220{
0a7de745 221 simple_lock(&btlog->btlog_lock, LCK_GRP_NULL);
39037602
A
222}
223static void
224btlog_unlock(btlog_t *btlog)
225{
226 simple_unlock(&btlog->btlog_lock);
227}
228
39236c6e
A
229btlog_t *
230btlog_create(size_t numrecords,
0a7de745
A
231 size_t record_btdepth,
232 boolean_t caller_will_remove_entries_for_element)
39236c6e
A
233{
234 btlog_t *btlog;
39037602
A
235 vm_size_t buffersize_needed = 0, elemsize_needed = 0;
236 vm_address_t buffer = 0, elem_buffer = 0, elem_hash_buffer = 0;
237 size_t i = 0;
39236c6e 238 kern_return_t ret;
39037602
A
239 size_t btrecord_size = 0;
240 uintptr_t free_elem = 0, next_free_elem = 0;
39236c6e 241
0a7de745 242 if (vm_kernel_ready && !kmem_alloc_ready) {
39236c6e 243 return NULL;
0a7de745 244 }
39236c6e 245
0a7de745 246 if (numrecords > BTLOG_MAX_RECORDS) {
39236c6e 247 return NULL;
0a7de745 248 }
39236c6e 249
0a7de745 250 if (numrecords == 0) {
39236c6e 251 return NULL;
0a7de745 252 }
39236c6e 253
0a7de745 254 if (record_btdepth > BTLOG_MAX_DEPTH) {
39236c6e 255 return NULL;
0a7de745 256 }
39236c6e 257
39236c6e
A
258 /* btlog_record_t is variable-sized, calculate needs now */
259 btrecord_size = sizeof(btlog_record_t)
0a7de745 260 + sizeof(void *) * record_btdepth;
39236c6e
A
261
262 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
263 buffersize_needed = round_page(buffersize_needed);
0a7de745 264
39037602 265 if (zelems_count == 0) {
0a7de745 266 zelems_count = ((max_mem + (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT;
39037602
A
267
268 if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) {
269 /*
270 * Need a max? With this scheme, it should be possible to tune the default
271 * so that we don't need a boot-arg to request more elements.
272 */
273 printf("Set number of log elements per btlog to: %ld\n", zelems_count);
274 }
275 }
276 elemsize_needed = sizeof(btlog_element_t) * zelems_count;
277 elemsize_needed = round_page(elemsize_needed);
39236c6e
A
278
279 /* since rounding to a page size might hold more, recalculate */
280 numrecords = MIN(BTLOG_MAX_RECORDS,
0a7de745 281 (buffersize_needed - sizeof(btlog_t)) / btrecord_size);
fe8ab488
A
282
283 if (kmem_alloc_ready) {
3e170ce0 284 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG);
0a7de745 285 if (ret != KERN_SUCCESS) {
39037602 286 return NULL;
0a7de745 287 }
39037602
A
288
289 ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG);
290 if (ret != KERN_SUCCESS) {
291 kmem_free(kernel_map, buffer, buffersize_needed);
292 buffer = 0;
293 return NULL;
294 }
295
296 if (caller_will_remove_entries_for_element == TRUE) {
297 ret = kmem_alloc(kernel_map, &elem_hash_buffer, ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
298 } else {
299 ret = kmem_alloc(kernel_map, &elem_hash_buffer, 2 * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
300 }
301
302 if (ret != KERN_SUCCESS) {
303 kmem_free(kernel_map, buffer, buffersize_needed);
304 buffer = 0;
305
306 kmem_free(kernel_map, elem_buffer, elemsize_needed);
307 elem_buffer = 0;
308 return NULL;
309 }
fe8ab488
A
310 } else {
311 buffer = (vm_address_t)pmap_steal_memory(buffersize_needed);
39037602
A
312 elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed);
313 if (caller_will_remove_entries_for_element == TRUE) {
314 elem_hash_buffer = (vm_address_t)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*));
315 } else {
316 elem_hash_buffer = (vm_address_t)pmap_steal_memory(2 * sizeof(btlog_element_t*));
317 }
fe8ab488
A
318 ret = KERN_SUCCESS;
319 }
39236c6e
A
320
321 btlog = (btlog_t *)buffer;
322 btlog->btlog_buffer = buffer;
323 btlog->btlog_buffersize = buffersize_needed;
39037602 324 btlog->freelist_elements = (btlog_element_t *)elem_buffer;
39236c6e 325
39037602
A
326 simple_lock_init(&btlog->btlog_lock, 0);
327
328 btlog->caller_will_remove_entries_for_element = caller_will_remove_entries_for_element;
329
330 if (caller_will_remove_entries_for_element == TRUE) {
331 btlog->elem_linkage_un.elem_recindex_hashtbl = (btlog_element_t **)elem_hash_buffer;
332 } else {
333 btlog->elem_linkage_un.element_hash_queue = (struct _element_hash_queue*) elem_hash_buffer;
334 TAILQ_INIT(btlog->elem_linkage_un.element_hash_queue);
335 }
39236c6e
A
336
337 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
39236c6e
A
338 btlog->btrecord_btdepth = record_btdepth;
339 btlog->btrecord_size = btrecord_size;
340
341 btlog->head = BTLOG_RECORDINDEX_NONE;
342 btlog->tail = BTLOG_RECORDINDEX_NONE;
39037602
A
343 btlog->active_record_count = 0;
344 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
345
0a7de745
A
346 for (i = 0; i < ELEMENT_HASH_BUCKET_COUNT; i++) {
347 btlog->elem_linkage_un.elem_recindex_hashtbl[i] = 0;
39037602 348 }
39236c6e 349
39037602
A
350 /* populate freelist_records with all records in order */
351 btlog->freelist_records = 0;
0a7de745 352 for (i = 0; i < (numrecords - 1); i++) {
39236c6e
A
353 btlog_record_t *rec = lookup_btrecord(btlog, i);
354 rec->next = (btlog_recordindex_t)(i + 1);
355 }
356 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
357
39037602
A
358 /* populate freelist_elements with all elements in order */
359 free_elem = (uintptr_t)btlog->freelist_elements;
360
0a7de745 361 for (i = 0; i < (zelems_count - 1); i++) {
39037602
A
362 next_free_elem = free_elem + sizeof(btlog_element_t);
363 *(uintptr_t*)free_elem = next_free_elem;
364 free_elem = next_free_elem;
365 }
366 *(uintptr_t*)next_free_elem = BTLOG_HASHELEMINDEX_NONE;
367
39236c6e
A
368 return btlog;
369}
370
371/* Assumes btlog is already locked */
372static btlog_recordindex_t
373btlog_get_record_from_freelist(btlog_t *btlog)
374{
0a7de745 375 btlog_recordindex_t recindex = btlog->freelist_records;
39236c6e
A
376
377 if (recindex == BTLOG_RECORDINDEX_NONE) {
378 /* nothing on freelist */
379 return BTLOG_RECORDINDEX_NONE;
380 } else {
39037602 381 /* remove the head of the freelist_records */
39236c6e 382 btlog_record_t *record = lookup_btrecord(btlog, recindex);
39037602 383 btlog->freelist_records = record->next;
39236c6e
A
384 return recindex;
385 }
386}
387
39037602
A
388static void
389btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex)
390{
391 btlog_recordindex_t precindex = BTLOG_RECORDINDEX_NONE;
392 btlog_record_t *precord = NULL, *record = NULL;
393
394 record = lookup_btrecord(btlog, recindex);
395
396 assert(TAILQ_EMPTY(&record->element_record_queue));
397
398 record->bthash = 0;
399
400 precindex = btlog->head;
401 precord = lookup_btrecord(btlog, precindex);
402
403 if (precindex == recindex) {
404 btlog->head = precord->next;
405 btlog->active_record_count--;
406
407 record->next = btlog->freelist_records;
408 btlog->freelist_records = recindex;
0a7de745 409
39037602
A
410 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
411 /* active list is now empty, update tail */
412 btlog->tail = BTLOG_RECORDINDEX_NONE;
413 assert(btlog->active_record_count == 0);
414 }
415 } else {
416 while (precindex != BTLOG_RECORDINDEX_NONE) {
417 if (precord->next == recindex) {
418 precord->next = record->next;
419 btlog->active_record_count--;
420
421 record->next = btlog->freelist_records;
422 btlog->freelist_records = recindex;
423
424 if (btlog->tail == recindex) {
425 btlog->tail = precindex;
426 }
427 break;
428 } else {
429 precindex = precord->next;
430 precord = lookup_btrecord(btlog, precindex);
431 }
432 }
433 }
434}
435
436
39236c6e 437/* Assumes btlog is already locked */
39037602
A
438static void
439btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict)
39236c6e 440{
0a7de745
A
441 btlog_recordindex_t recindex = btlog->head;
442 btlog_record_t *record = NULL;
443 btlog_element_t *recelem = NULL;
39236c6e
A
444
445 if (recindex == BTLOG_RECORDINDEX_NONE) {
446 /* nothing on active list */
39037602 447 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog);
39236c6e 448 } else {
39037602
A
449 while (num_elements_to_evict) {
450 /*
451 * LEAKS: reap the oldest element within the record with the lowest refs.
452 * CORRUPTION: reap the oldest element overall and drop its reference on the record
453 */
454
455 if (btlog->caller_will_remove_entries_for_element) {
0a7de745
A
456 uint32_t max_refs_threshold = UINT32_MAX;
457 btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0;
39037602
A
458
459 prev_evictindex = evict_index = btlog->head;
0a7de745 460 precindex = recindex = btlog->head;
39037602
A
461
462 while (recindex != BTLOG_RECORDINDEX_NONE) {
0a7de745 463 record = lookup_btrecord(btlog, recindex);
39037602
A
464
465 if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) {
0a7de745 466 /* skip this record */
39037602
A
467 } else {
468 prev_evictindex = precindex;
469 evict_index = recindex;
470 max_refs_threshold = record->ref_count;
471 }
472
473 if (record->next != BTLOG_RECORDINDEX_NONE) {
474 precindex = recindex;
475 }
476
477 recindex = record->next;
478 }
479
480 recindex = evict_index;
481 assert(recindex != BTLOG_RECORDINDEX_NONE);
0a7de745
A
482 record = lookup_btrecord(btlog, recindex);
483
39037602
A
484 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
485 } else {
39037602
A
486 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
487 recindex = recelem->recindex;
488 record = lookup_btrecord(btlog, recindex);
489 }
490
491 /*
492 * Here we have the element to drop (recelem), its record and the record index.
493 */
494
495 while (recelem && num_elements_to_evict) {
39037602
A
496 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
497
498 if (btlog->caller_will_remove_entries_for_element) {
0a7de745
A
499 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
500 uint32_t hashidx = 0;
39037602 501
39037602
A
502 hashidx = calculate_hashidx_for_element(~recelem->elem, btlog);
503
504 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
505 while (hashelem != NULL) {
0a7de745 506 if (hashelem == recelem) {
39037602 507 break;
0a7de745 508 } else {
39037602
A
509 prev_hashelem = hashelem;
510 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
511 }
512 }
0a7de745 513
39037602
A
514 if (hashelem == NULL) {
515 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record);
516 }
517
518 if (prev_hashelem != hashelem) {
519 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
520 } else {
521 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
522 }
523 } else {
39037602
A
524 TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link);
525 }
526
527 btlog_add_elem_to_freelist(btlog, recelem);
528 btlog->active_element_count--;
529
530 num_elements_to_evict--;
531
532 assert(record->ref_count);
533
534 record->ref_count--;
535
536 if (record->ref_count == 0) {
39037602 537 btlog_add_record_to_freelist(btlog, recindex);
0a7de745 538
39037602
A
539 /*
540 * LEAKS: All done with this record. Need the next least popular record.
541 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
542 */
543
544 if (btlog->caller_will_remove_entries_for_element) {
545 break;
546 }
547 }
548
549 if (btlog->caller_will_remove_entries_for_element) {
550 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
551 } else {
39037602
A
552 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
553 recindex = recelem->recindex;
554 record = lookup_btrecord(btlog, recindex);
555 }
556 }
39236c6e 557 }
39236c6e
A
558 }
559}
560
561/* Assumes btlog is already locked */
562static void
563btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
564{
39037602
A
565 assert(recindex != BTLOG_RECORDINDEX_NONE);
566
39236c6e
A
567 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
568 /* empty active list, update both head and tail */
569 btlog->head = btlog->tail = recindex;
570 } else {
571 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
572 record->next = recindex;
573 btlog->tail = recindex;
574 }
39037602
A
575 btlog->active_record_count++;
576}
577
578btlog_element_t*
579btlog_get_elem_from_freelist(btlog_t *btlog)
580{
581 btlog_element_t *free_elem = NULL;
582
583retry:
584 free_elem = btlog->freelist_elements;
585
586 if ((uintptr_t)free_elem == BTLOG_HASHELEMINDEX_NONE) {
587 /* nothing on freelist */
588 btlog_evict_elements_from_record(btlog, 1);
589 goto retry;
590 } else {
591 /* remove the head of the freelist */
592 uintptr_t next_elem = *(uintptr_t*)free_elem;
593 btlog->freelist_elements = (btlog_element_t *)next_elem;
594 return free_elem;
595 }
596}
597
598void
599btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem)
600{
601 btlog_element_t *free_elem = btlog->freelist_elements;
602
603 TAILQ_NEXT(elem, element_hash_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
604 TAILQ_NEXT(elem, element_record_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
605
606 *(uintptr_t*)elem = (uintptr_t)free_elem;
607 btlog->freelist_elements = elem;
39236c6e
A
608}
609
610void
611btlog_add_entry(btlog_t *btlog,
0a7de745
A
612 void *element,
613 uint8_t operation,
614 void *bt[],
615 size_t btcount)
39236c6e 616{
0a7de745
A
617 btlog_recordindex_t recindex = 0;
618 btlog_record_t *record = NULL;
619 size_t i;
620 u_int32_t md5_buffer[4];
621 MD5_CTX btlog_ctx;
622 uint32_t hashidx = 0;
39236c6e 623
0a7de745 624 btlog_element_t *hashelem = NULL;
39236c6e 625
0a7de745 626 if (g_crypto_funcs == NULL) {
39037602 627 return;
0a7de745 628 }
39236c6e 629
39037602 630 btlog_lock(btlog);
39236c6e 631
39037602 632 MD5Init(&btlog_ctx);
0a7de745 633 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
39037602 634 MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i]));
39236c6e 635 }
39037602
A
636 MD5Final((u_char *) &md5_buffer, &btlog_ctx);
637
638 recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount);
639
640 if (recindex != BTLOG_RECORDINDEX_NONE) {
39037602
A
641 record = lookup_btrecord(btlog, recindex);
642 record->ref_count++;
643 assert(record->operation == operation);
644 } else {
645retry:
646 /* If there's a free record, use it */
647 recindex = btlog_get_record_from_freelist(btlog);
648 if (recindex == BTLOG_RECORDINDEX_NONE) {
649 /* Use the first active record (FIFO age-out) */
0a7de745 650 btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t)) / sizeof(btlog_element_t)));
39037602
A
651 goto retry;
652 }
653
654 record = lookup_btrecord(btlog, recindex);
655
656 /* we always add to the tail, so there is no next pointer */
657 record->next = BTLOG_RECORDINDEX_NONE;
658 record->operation = operation;
659 record->bthash = md5_buffer[0];
660 record->ref_count = 1;
661 TAILQ_INIT(&record->element_record_queue);
662
0a7de745 663 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
39037602
A
664 record->bt[i] = bt[i];
665 }
666
667 for (; i < btlog->btrecord_btdepth; i++) {
668 record->bt[i] = NULL;
669 }
670
671 btlog_append_record_to_activelist(btlog, recindex);
39236c6e
A
672 }
673
39037602
A
674 btlog->activerecord = recindex;
675
676 hashidx = calculate_hashidx_for_element((uintptr_t)element, btlog);
677 hashelem = btlog_get_elem_from_freelist(btlog);
678
39037602
A
679 hashelem->elem = ~((uintptr_t)element);
680 hashelem->operation = record->operation;
681 hashelem->recindex = recindex;
682
683 TAILQ_INSERT_HEAD(&record->element_record_queue, hashelem, element_record_link);
684
685 if (btlog->caller_will_remove_entries_for_element) {
686 TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
687 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem;
39037602
A
688 } else {
689 TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link);
690 }
691
692 btlog->active_element_count++;
693
694 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
695
696 btlog_unlock(btlog);
39236c6e
A
697}
698
699void
700btlog_remove_entries_for_element(btlog_t *btlog,
0a7de745 701 void *element)
39236c6e 702{
0a7de745
A
703 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
704 btlog_record_t *record = NULL;
705 uint32_t hashidx = 0;
706
707 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
39037602
A
708
709 if (btlog->caller_will_remove_entries_for_element == FALSE) {
710 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog);
711 }
712
0a7de745 713 if (g_crypto_funcs == NULL) {
39037602 714 return;
0a7de745 715 }
39037602
A
716
717 btlog_lock(btlog);
718
719 hashidx = calculate_hashidx_for_element((uintptr_t) element, btlog);
720 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
721
722 while (hashelem != NULL) {
0a7de745 723 if (~hashelem->elem == (uintptr_t)element) {
39236c6e 724 break;
0a7de745 725 } else {
39037602
A
726 prev_hashelem = hashelem;
727 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
39236c6e
A
728 }
729 }
730
39037602 731 if (hashelem) {
0a7de745 732 btlog_element_t *recelem = NULL;
39037602
A
733
734 if (prev_hashelem != hashelem) {
735 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
736 } else {
39037602
A
737 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
738 }
739
740 recindex = hashelem->recindex;
39236c6e 741 record = lookup_btrecord(btlog, recindex);
0a7de745 742
39037602
A
743 recelem = hashelem;
744 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
39236c6e 745
39037602
A
746 btlog_add_elem_to_freelist(btlog, hashelem);
747 btlog->active_element_count--;
39236c6e 748
39037602 749 assert(record->ref_count);
39236c6e 750
39037602 751 record->ref_count--;
39236c6e 752
39037602
A
753 if (record->ref_count == 0) {
754 btlog_add_record_to_freelist(btlog, recindex);
755 }
39236c6e
A
756 }
757
39037602
A
758 btlog_unlock(btlog);
759}
760
761#if DEBUG || DEVELOPMENT
39236c6e 762
39037602
A
763void
764btlog_copy_backtraces_for_elements(btlog_t * btlog,
0a7de745
A
765 uintptr_t * instances,
766 uint32_t * countp,
767 uint32_t zoneSize,
768 leak_site_proc proc,
769 void * refCon)
39037602 770{
0a7de745
A
771 btlog_recordindex_t recindex;
772 btlog_record_t * record;
773 btlog_element_t * hashelem;
774 uint32_t hashidx, idx, dups, numSites, siteCount;
39037602 775 uintptr_t element, site;
0a7de745 776 uint32_t count;
39037602
A
777
778 btlog_lock(btlog);
779
0a7de745
A
780 count = *countp;
781 for (numSites = 0, idx = 0; idx < count; idx++) {
782 element = instances[idx];
783
784 if (kInstanceFlagReferenced & element) {
785 continue;
786 }
787 element = INSTANCE_PUT(element) & ~kInstanceFlags;
788
789 site = 0;
790 hashidx = calculate_hashidx_for_element(element, btlog);
791 hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
792 while (hashelem != NULL) {
793 if (~hashelem->elem == element) {
794 break;
795 }
796 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
797 }
798 if (hashelem) {
799 recindex = hashelem->recindex;
800 site = (uintptr_t) lookup_btrecord(btlog, recindex);
801 }
802 if (site) {
803 element = (site | kInstanceFlagReferenced);
804 }
805 instances[numSites] = INSTANCE_PUT(element);
806 numSites++;
807 }
808
809 for (idx = 0; idx < numSites; idx++) {
810 site = instances[idx];
811 if (!site) {
812 continue;
813 }
814 if (!(kInstanceFlagReferenced & site)) {
815 continue;
816 }
817 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
818 if (instances[dups] == site) {
819 siteCount++;
820 instances[dups] = 0;
821 }
822 }
823 record = (typeof(record))(INSTANCE_PUT(site) & ~kInstanceFlags);
824 (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth);
825 }
826
827 *countp = numSites;
39037602
A
828
829 btlog_unlock(btlog);
39236c6e 830}
39037602 831
d9a64523
A
832/*
833 * Returns the number of records in the btlog struct.
834 *
835 * Called by the mach_zone_get_btlog_records() MIG routine.
836 */
837size_t
838get_btlog_records_count(btlog_t *btlog)
839{
840 if (btlog->btlog_buffersize < sizeof(btlog_t)) {
841 return 0;
842 }
0a7de745 843 return (btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size;
d9a64523
A
844}
845
846/*
847 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
848 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
849 *
850 * Called by the mach_zone_get_btlog_records() MIG routine.
851 */
852void
853get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrecs)
854{
855 unsigned int count, recs_copied, frame;
856 zone_btrecord_t *current_rec;
857 btlog_record_t *zstack_record;
0a7de745 858 btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE;
d9a64523
A
859
860 btlog_lock(btlog);
861
862 count = 0;
863 if (btlog->btlog_buffersize > sizeof(btlog_t)) {
0a7de745 864 count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size);
d9a64523
A
865 }
866 /* Copy out only as many records as the pre-allocated buffer size permits. */
867 if (count > *numrecs) {
868 count = *numrecs;
869 }
870 zstack_index = btlog->head;
871
872 current_rec = &records[0];
873 recs_copied = 0;
874 while (recs_copied < count && (zstack_index != BTLOG_RECORDINDEX_NONE)) {
875 zstack_record = lookup_btrecord(btlog, zstack_index);
876 current_rec->operation_type = (uint32_t)(zstack_record->operation);
877 current_rec->ref_count = zstack_record->ref_count;
878
879 frame = 0;
880 while (frame < MIN(btlog->btrecord_btdepth, MAX_ZTRACE_DEPTH)) {
881 current_rec->bt[frame] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record->bt[frame]);
882 frame++;
883 }
884
885 zstack_index = zstack_record->next;
886 recs_copied++;
887 current_rec++;
888 }
889 *numrecs = recs_copied;
890
891 btlog_unlock(btlog);
892}
893
39037602 894#endif /* DEBUG || DEVELOPMENT */