2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
35 #include <mach/vm_param.h>
37 #include <libkern/crypto/md5.h>
38 #include <libkern/crypto/crypto_internal.h>
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
46 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */)
47 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
63 #define ELEMENT_HASH_BUCKET_COUNT (256)
64 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
66 #define ZELEMS_DEFAULT (8000)
67 size_t zelems_count
= 0;
69 typedef uint32_t btlog_recordindex_t
; /* only 24 bits used */
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
75 TAILQ_HEAD(_element_record_queue
, btlog_element
);
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
81 TAILQ_HEAD(_element_hash_queue
, btlog_element
);
83 typedef struct btlog_record
{
84 btlog_recordindex_t next
:24,
88 struct _element_record_queue element_record_queue
;
89 void *bt
[]; /* variable sized, based on btlog_t params */
92 typedef struct btlog_element
{
93 btlog_recordindex_t recindex
:24,
96 TAILQ_ENTRY(btlog_element
) element_record_link
; /* Links to other elements pointing to the same stack. */
98 TAILQ_ENTRY(btlog_element
) element_hash_link
; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
107 vm_address_t btlog_buffer
; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize
;
110 uintptr_t btrecords
; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth
; /* BT entries per record */
112 size_t btrecord_size
;
114 btlog_recordindex_t head
; /* active record list */
115 btlog_recordindex_t tail
;
116 btlog_recordindex_t activerecord
;
117 btlog_recordindex_t freelist_records
;
119 size_t active_record_count
;
120 size_t active_element_count
;
121 btlog_element_t
*freelist_elements
;
123 btlog_element_t
**elem_recindex_hashtbl
; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue
*element_hash_queue
; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
127 decl_simple_lock_data(,btlog_lock
);
128 boolean_t caller_will_remove_entries_for_element
; /* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
134 extern boolean_t vm_kernel_ready
;
135 extern boolean_t kmem_alloc_ready
;
137 #define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
140 uint32_t calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
);
141 uint32_t lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
);
143 void btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*hash_elem
);
144 btlog_element_t
* btlog_get_elem_from_freelist(btlog_t
*btlog
);
147 lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
)
149 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
150 btlog_record_t
*record
= NULL
;
152 boolean_t stack_matched
= TRUE
;
157 recindex
= btlog
->head
;
158 record
= lookup_btrecord(btlog
, recindex
);
159 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
160 assert(record
->bthash
);
161 assert(! TAILQ_EMPTY(&record
->element_record_queue
));
162 if (record
->bthash
== md5_hash
) {
165 * Make sure that the incoming stack actually matches the
166 * stack in this record. Since we only save off a
167 * part of the md5 hash there can be collisions sometimes.
168 * This comparison isn't costly because, in case of collisions,
169 * usually the first few frames are different.
172 stack_matched
= TRUE
;
174 if (btcount
< btlog
->btrecord_btdepth
) {
175 if (record
->bt
[btcount
] != NULL
) {
177 * If the stack depth passed in is smaller than
178 * the recorded stack and we have a valid pointer
179 * in the recorded stack at that depth, then we
180 * don't need to do any further checks.
182 stack_matched
= FALSE
;
187 for (i
=0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
188 if (record
->bt
[i
] != bt
[i
]) {
189 stack_matched
= FALSE
;
194 if (stack_matched
== TRUE
) {
199 recindex
= record
->next
;
200 record
= lookup_btrecord(btlog
, recindex
);
207 calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
)
209 if (btlog
->caller_will_remove_entries_for_element
) {
212 addr
= (uint32_t) ((elem
& 0xFF00) >> 0x8);
221 btlog_lock(btlog_t
*btlog
)
223 simple_lock(&btlog
->btlog_lock
);
226 btlog_unlock(btlog_t
*btlog
)
228 simple_unlock(&btlog
->btlog_lock
);
232 btlog_create(size_t numrecords
,
233 size_t record_btdepth
,
234 boolean_t caller_will_remove_entries_for_element
)
237 vm_size_t buffersize_needed
= 0, elemsize_needed
= 0;
238 vm_address_t buffer
= 0, elem_buffer
= 0, elem_hash_buffer
= 0;
241 size_t btrecord_size
= 0;
242 uintptr_t free_elem
= 0, next_free_elem
= 0;
244 if (vm_kernel_ready
&& !kmem_alloc_ready
)
247 if (numrecords
> BTLOG_MAX_RECORDS
)
253 if (record_btdepth
> BTLOG_MAX_DEPTH
)
256 /* btlog_record_t is variable-sized, calculate needs now */
257 btrecord_size
= sizeof(btlog_record_t
)
258 + sizeof(void *) * record_btdepth
;
260 buffersize_needed
= sizeof(btlog_t
) + numrecords
* btrecord_size
;
261 buffersize_needed
= round_page(buffersize_needed
);
263 if (zelems_count
== 0) {
264 zelems_count
= ((max_mem
+ (1024*1024*1024) /*GB*/) >> 30) * ZELEMS_DEFAULT
;
266 if (PE_parse_boot_argn("zelems", &zelems_count
, sizeof(zelems_count
)) == TRUE
) {
268 * Need a max? With this scheme, it should be possible to tune the default
269 * so that we don't need a boot-arg to request more elements.
271 printf("Set number of log elements per btlog to: %ld\n", zelems_count
);
274 elemsize_needed
= sizeof(btlog_element_t
) * zelems_count
;
275 elemsize_needed
= round_page(elemsize_needed
);
277 /* since rounding to a page size might hold more, recalculate */
278 numrecords
= MIN(BTLOG_MAX_RECORDS
,
279 (buffersize_needed
- sizeof(btlog_t
))/btrecord_size
);
281 if (kmem_alloc_ready
) {
282 ret
= kmem_alloc(kernel_map
, &buffer
, buffersize_needed
, VM_KERN_MEMORY_DIAG
);
283 if (ret
!= KERN_SUCCESS
)
286 ret
= kmem_alloc(kernel_map
, &elem_buffer
, elemsize_needed
, VM_KERN_MEMORY_DIAG
);
287 if (ret
!= KERN_SUCCESS
) {
288 kmem_free(kernel_map
, buffer
, buffersize_needed
);
293 if (caller_will_remove_entries_for_element
== TRUE
) {
294 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
296 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, 2 * sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
299 if (ret
!= KERN_SUCCESS
) {
300 kmem_free(kernel_map
, buffer
, buffersize_needed
);
303 kmem_free(kernel_map
, elem_buffer
, elemsize_needed
);
309 buffer
= (vm_address_t
)pmap_steal_memory(buffersize_needed
);
310 elem_buffer
= (vm_address_t
)pmap_steal_memory(elemsize_needed
);
311 if (caller_will_remove_entries_for_element
== TRUE
) {
312 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*));
314 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(2 * sizeof(btlog_element_t
*));
319 btlog
= (btlog_t
*)buffer
;
320 btlog
->btlog_buffer
= buffer
;
321 btlog
->btlog_buffersize
= buffersize_needed
;
322 btlog
->freelist_elements
= (btlog_element_t
*)elem_buffer
;
324 simple_lock_init(&btlog
->btlog_lock
, 0);
326 btlog
->caller_will_remove_entries_for_element
= caller_will_remove_entries_for_element
;
328 if (caller_will_remove_entries_for_element
== TRUE
) {
329 btlog
->elem_linkage_un
.elem_recindex_hashtbl
= (btlog_element_t
**)elem_hash_buffer
;
331 btlog
->elem_linkage_un
.element_hash_queue
= (struct _element_hash_queue
*) elem_hash_buffer
;
332 TAILQ_INIT(btlog
->elem_linkage_un
.element_hash_queue
);
335 btlog
->btrecords
= (uintptr_t)(buffer
+ sizeof(btlog_t
));
336 btlog
->btrecord_btdepth
= record_btdepth
;
337 btlog
->btrecord_size
= btrecord_size
;
339 btlog
->head
= BTLOG_RECORDINDEX_NONE
;
340 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
341 btlog
->active_record_count
= 0;
342 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
344 for (i
=0; i
< ELEMENT_HASH_BUCKET_COUNT
; i
++) {
345 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[i
]=0;
348 /* populate freelist_records with all records in order */
349 btlog
->freelist_records
= 0;
350 for (i
=0; i
< (numrecords
- 1); i
++) {
351 btlog_record_t
*rec
= lookup_btrecord(btlog
, i
);
352 rec
->next
= (btlog_recordindex_t
)(i
+ 1);
354 lookup_btrecord(btlog
, i
)->next
= BTLOG_RECORDINDEX_NONE
; /* terminate */
356 /* populate freelist_elements with all elements in order */
357 free_elem
= (uintptr_t)btlog
->freelist_elements
;
359 for (i
=0; i
< (zelems_count
- 1); i
++) {
361 next_free_elem
= free_elem
+ sizeof(btlog_element_t
);
362 *(uintptr_t*)free_elem
= next_free_elem
;
363 free_elem
= next_free_elem
;
365 *(uintptr_t*)next_free_elem
= BTLOG_HASHELEMINDEX_NONE
;
370 /* Assumes btlog is already locked */
371 static btlog_recordindex_t
372 btlog_get_record_from_freelist(btlog_t
*btlog
)
374 btlog_recordindex_t recindex
= btlog
->freelist_records
;
376 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
377 /* nothing on freelist */
378 return BTLOG_RECORDINDEX_NONE
;
380 /* remove the head of the freelist_records */
381 btlog_record_t
*record
= lookup_btrecord(btlog
, recindex
);
382 btlog
->freelist_records
= record
->next
;
388 btlog_add_record_to_freelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
390 btlog_recordindex_t precindex
= BTLOG_RECORDINDEX_NONE
;
391 btlog_record_t
*precord
= NULL
, *record
= NULL
;
393 record
= lookup_btrecord(btlog
, recindex
);
395 assert(TAILQ_EMPTY(&record
->element_record_queue
));
399 precindex
= btlog
->head
;
400 precord
= lookup_btrecord(btlog
, precindex
);
402 if (precindex
== recindex
) {
403 btlog
->head
= precord
->next
;
404 btlog
->active_record_count
--;
406 record
->next
= btlog
->freelist_records
;
407 btlog
->freelist_records
= recindex
;
409 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
410 /* active list is now empty, update tail */
411 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
412 assert(btlog
->active_record_count
== 0);
415 while (precindex
!= BTLOG_RECORDINDEX_NONE
) {
416 if (precord
->next
== recindex
) {
417 precord
->next
= record
->next
;
418 btlog
->active_record_count
--;
420 record
->next
= btlog
->freelist_records
;
421 btlog
->freelist_records
= recindex
;
423 if (btlog
->tail
== recindex
) {
424 btlog
->tail
= precindex
;
428 precindex
= precord
->next
;
429 precord
= lookup_btrecord(btlog
, precindex
);
436 /* Assumes btlog is already locked */
438 btlog_evict_elements_from_record(btlog_t
*btlog
, int num_elements_to_evict
)
440 btlog_recordindex_t recindex
= btlog
->head
;
441 btlog_record_t
*record
= NULL
;
442 btlog_element_t
*recelem
= NULL
;
444 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
445 /* nothing on active list */
446 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog
);
449 while (num_elements_to_evict
) {
451 * LEAKS: reap the oldest element within the record with the lowest refs.
452 * CORRUPTION: reap the oldest element overall and drop its reference on the record
455 if (btlog
->caller_will_remove_entries_for_element
) {
456 uint32_t max_refs_threshold
= UINT32_MAX
;
457 btlog_recordindex_t precindex
= 0, prev_evictindex
= 0, evict_index
= 0;
459 prev_evictindex
= evict_index
= btlog
->head
;
460 precindex
= recindex
= btlog
->head
;
462 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
464 record
= lookup_btrecord(btlog
, recindex
);
466 if (btlog
->activerecord
== recindex
|| record
->ref_count
> max_refs_threshold
) {
467 /* skip this record */
469 prev_evictindex
= precindex
;
470 evict_index
= recindex
;
471 max_refs_threshold
= record
->ref_count
;
474 if (record
->next
!= BTLOG_RECORDINDEX_NONE
) {
475 precindex
= recindex
;
478 recindex
= record
->next
;
481 recindex
= evict_index
;
482 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
483 record
= lookup_btrecord(btlog
, recindex
);
485 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
488 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
489 recindex
= recelem
->recindex
;
490 record
= lookup_btrecord(btlog
, recindex
);
494 * Here we have the element to drop (recelem), its record and the record index.
497 while (recelem
&& num_elements_to_evict
) {
499 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
501 if (btlog
->caller_will_remove_entries_for_element
) {
503 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
504 uint32_t hashidx
= 0;
506 hashidx
= calculate_hashidx_for_element(~recelem
->elem
, btlog
);
508 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
509 while (hashelem
!= NULL
) {
510 if (hashelem
== recelem
)
513 prev_hashelem
= hashelem
;
514 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
518 if (hashelem
== NULL
) {
519 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record
);
522 if (prev_hashelem
!= hashelem
) {
523 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
525 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
529 TAILQ_REMOVE(btlog
->elem_linkage_un
.element_hash_queue
, recelem
, element_hash_link
);
532 btlog_add_elem_to_freelist(btlog
, recelem
);
533 btlog
->active_element_count
--;
535 num_elements_to_evict
--;
537 assert(record
->ref_count
);
541 if (record
->ref_count
== 0) {
543 btlog_add_record_to_freelist(btlog
, recindex
);
546 * LEAKS: All done with this record. Need the next least popular record.
547 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
550 if (btlog
->caller_will_remove_entries_for_element
) {
555 if (btlog
->caller_will_remove_entries_for_element
) {
556 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
559 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
560 recindex
= recelem
->recindex
;
561 record
= lookup_btrecord(btlog
, recindex
);
568 /* Assumes btlog is already locked */
570 btlog_append_record_to_activelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
573 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
575 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
576 /* empty active list, update both head and tail */
577 btlog
->head
= btlog
->tail
= recindex
;
579 btlog_record_t
*record
= lookup_btrecord(btlog
, btlog
->tail
);
580 record
->next
= recindex
;
581 btlog
->tail
= recindex
;
583 btlog
->active_record_count
++;
587 btlog_get_elem_from_freelist(btlog_t
*btlog
)
589 btlog_element_t
*free_elem
= NULL
;
592 free_elem
= btlog
->freelist_elements
;
594 if ((uintptr_t)free_elem
== BTLOG_HASHELEMINDEX_NONE
) {
595 /* nothing on freelist */
596 btlog_evict_elements_from_record(btlog
, 1);
599 /* remove the head of the freelist */
600 uintptr_t next_elem
= *(uintptr_t*)free_elem
;
601 btlog
->freelist_elements
= (btlog_element_t
*)next_elem
;
607 btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*elem
)
609 btlog_element_t
*free_elem
= btlog
->freelist_elements
;
611 TAILQ_NEXT(elem
, element_hash_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
612 TAILQ_NEXT(elem
, element_record_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
614 *(uintptr_t*)elem
= (uintptr_t)free_elem
;
615 btlog
->freelist_elements
= elem
;
619 btlog_add_entry(btlog_t
*btlog
,
625 btlog_recordindex_t recindex
= 0;
626 btlog_record_t
*record
= NULL
;
628 u_int32_t md5_buffer
[4];
630 uint32_t hashidx
= 0;
632 btlog_element_t
*hashelem
= NULL
;
634 if (g_crypto_funcs
== NULL
)
640 for (i
=0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
641 MD5Update(&btlog_ctx
, (u_char
*) &bt
[i
], sizeof(bt
[i
]));
643 MD5Final((u_char
*) &md5_buffer
, &btlog_ctx
);
645 recindex
= lookup_btrecord_byhash(btlog
, md5_buffer
[0], bt
, btcount
);
647 if (recindex
!= BTLOG_RECORDINDEX_NONE
) {
649 record
= lookup_btrecord(btlog
, recindex
);
651 assert(record
->operation
== operation
);
654 /* If there's a free record, use it */
655 recindex
= btlog_get_record_from_freelist(btlog
);
656 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
657 /* Use the first active record (FIFO age-out) */
658 btlog_evict_elements_from_record(btlog
, ((2 * sizeof(btlog_record_t
))/sizeof(btlog_element_t
)));
662 record
= lookup_btrecord(btlog
, recindex
);
664 /* we always add to the tail, so there is no next pointer */
665 record
->next
= BTLOG_RECORDINDEX_NONE
;
666 record
->operation
= operation
;
667 record
->bthash
= md5_buffer
[0];
668 record
->ref_count
= 1;
669 TAILQ_INIT(&record
->element_record_queue
);
671 for (i
=0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
672 record
->bt
[i
] = bt
[i
];
675 for (; i
< btlog
->btrecord_btdepth
; i
++) {
676 record
->bt
[i
] = NULL
;
679 btlog_append_record_to_activelist(btlog
, recindex
);
682 btlog
->activerecord
= recindex
;
684 hashidx
= calculate_hashidx_for_element((uintptr_t)element
, btlog
);
685 hashelem
= btlog_get_elem_from_freelist(btlog
);
687 assert(record
->bthash
);
689 hashelem
->elem
= ~((uintptr_t)element
);
690 hashelem
->operation
= record
->operation
;
691 hashelem
->recindex
= recindex
;
693 TAILQ_INSERT_HEAD(&record
->element_record_queue
, hashelem
, element_record_link
);
695 if (btlog
->caller_will_remove_entries_for_element
) {
696 TAILQ_NEXT(hashelem
, element_hash_link
) = btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
697 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = hashelem
;
700 TAILQ_INSERT_HEAD(btlog
->elem_linkage_un
.element_hash_queue
, hashelem
, element_hash_link
);
703 btlog
->active_element_count
++;
705 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
711 btlog_remove_entries_for_element(btlog_t
*btlog
,
714 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
715 btlog_record_t
*record
= NULL
;
716 uint32_t hashidx
= 0;
718 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
720 if (btlog
->caller_will_remove_entries_for_element
== FALSE
) {
721 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog
);
724 if (g_crypto_funcs
== NULL
)
729 hashidx
= calculate_hashidx_for_element((uintptr_t) element
, btlog
);
730 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
732 while (hashelem
!= NULL
) {
733 if (~hashelem
->elem
== (uintptr_t)element
)
736 prev_hashelem
= hashelem
;
737 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
743 btlog_element_t
*recelem
= NULL
;
745 if (prev_hashelem
!= hashelem
) {
746 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
749 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
752 recindex
= hashelem
->recindex
;
753 record
= lookup_btrecord(btlog
, recindex
);
756 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
758 btlog_add_elem_to_freelist(btlog
, hashelem
);
759 btlog
->active_element_count
--;
761 assert(record
->ref_count
);
765 if (record
->ref_count
== 0) {
766 btlog_add_record_to_freelist(btlog
, recindex
);
773 #if DEBUG || DEVELOPMENT
776 btlog_copy_backtraces_for_elements(btlog_t
* btlog
,
777 uintptr_t * instances
,
783 btlog_recordindex_t recindex
;
784 btlog_record_t
* record
;
785 btlog_element_t
* hashelem
;
786 uint32_t hashidx
, idx
, dups
, numSites
, siteCount
;
787 uintptr_t element
, site
;
793 for (numSites
= 0, idx
= 0; idx
< count
; idx
++)
795 element
= instances
[idx
];
797 if (kInstanceFlagReferenced
& element
) continue;
798 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
801 hashidx
= calculate_hashidx_for_element(element
, btlog
);
802 hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
803 while (hashelem
!= NULL
)
805 if (~hashelem
->elem
== element
) break;
806 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
810 recindex
= hashelem
->recindex
;
811 site
= (uintptr_t) lookup_btrecord(btlog
, recindex
);
813 if (site
) element
= (site
| kInstanceFlagReferenced
);
814 instances
[numSites
] = INSTANCE_PUT(element
);
818 for (idx
= 0; idx
< numSites
; idx
++)
820 site
= instances
[idx
];
822 if (!(kInstanceFlagReferenced
& site
)) continue;
823 for (siteCount
= 1, dups
= (idx
+ 1); dups
< numSites
; dups
++)
825 if (instances
[dups
] == site
)
831 record
= (typeof(record
)) (INSTANCE_PUT(site
) & ~kInstanceFlags
);
832 (*proc
)(refCon
, siteCount
, zoneSize
, (uintptr_t *) &record
->bt
[0], (uint32_t) btlog
->btrecord_btdepth
);
840 #endif /* DEBUG || DEVELOPMENT */