2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
35 #include <mach/vm_param.h>
37 #include <libkern/crypto/md5.h>
38 #include <libkern/crypto/crypto_internal.h>
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
46 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
47 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
63 #define ELEMENT_HASH_BUCKET_COUNT (256)
64 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
66 #define ZELEMS_DEFAULT (8000)
67 size_t zelems_count
= 0;
69 typedef uint32_t btlog_recordindex_t
; /* only 24 bits used */
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
75 TAILQ_HEAD(_element_record_queue
, btlog_element
);
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
81 TAILQ_HEAD(_element_hash_queue
, btlog_element
);
83 typedef struct btlog_record
{
84 btlog_recordindex_t next
:24,
88 struct _element_record_queue element_record_queue
;
89 void *bt
[];/* variable sized, based on btlog_t params */
92 typedef struct btlog_element
{
93 btlog_recordindex_t recindex
:24,
96 TAILQ_ENTRY(btlog_element
) element_record_link
; /* Links to other elements pointing to the same stack. */
98 TAILQ_ENTRY(btlog_element
) element_hash_link
; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
107 vm_address_t btlog_buffer
; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize
;
110 uintptr_t btrecords
; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth
;/* BT entries per record */
112 size_t btrecord_size
;
114 btlog_recordindex_t head
; /* active record list */
115 btlog_recordindex_t tail
;
116 btlog_recordindex_t activerecord
;
117 btlog_recordindex_t freelist_records
;
119 size_t active_record_count
;
120 size_t active_element_count
;
121 btlog_element_t
*freelist_elements
;
123 btlog_element_t
**elem_recindex_hashtbl
; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue
*element_hash_queue
; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
127 decl_simple_lock_data(, btlog_lock
);
128 boolean_t caller_will_remove_entries_for_element
;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
134 extern boolean_t vm_kernel_ready
;
135 extern boolean_t kmem_alloc_ready
;
137 #define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
140 uint32_t calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
);
141 uint32_t lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
);
143 void btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*hash_elem
);
144 btlog_element_t
* btlog_get_elem_from_freelist(btlog_t
*btlog
);
147 lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
)
149 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
150 btlog_record_t
*record
= NULL
;
152 boolean_t stack_matched
= TRUE
;
157 recindex
= btlog
->head
;
158 record
= lookup_btrecord(btlog
, recindex
);
159 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
160 assert(!TAILQ_EMPTY(&record
->element_record_queue
));
161 if (record
->bthash
== md5_hash
) {
163 * Make sure that the incoming stack actually matches the
164 * stack in this record. Since we only save off a
165 * part of the md5 hash there can be collisions sometimes.
166 * This comparison isn't costly because, in case of collisions,
167 * usually the first few frames are different.
170 stack_matched
= TRUE
;
172 if (btcount
< btlog
->btrecord_btdepth
) {
173 if (record
->bt
[btcount
] != NULL
) {
175 * If the stack depth passed in is smaller than
176 * the recorded stack and we have a valid pointer
177 * in the recorded stack at that depth, then we
178 * don't need to do any further checks.
180 stack_matched
= FALSE
;
185 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
186 if (record
->bt
[i
] != bt
[i
]) {
187 stack_matched
= FALSE
;
192 if (stack_matched
== TRUE
) {
197 recindex
= record
->next
;
198 record
= lookup_btrecord(btlog
, recindex
);
205 calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
)
207 if (btlog
->caller_will_remove_entries_for_element
) {
210 addr
= (uint32_t) ((elem
& 0xFF00) >> 0x8);
219 btlog_lock(btlog_t
*btlog
)
221 simple_lock(&btlog
->btlog_lock
, LCK_GRP_NULL
);
224 btlog_unlock(btlog_t
*btlog
)
226 simple_unlock(&btlog
->btlog_lock
);
230 btlog_create(size_t numrecords
,
231 size_t record_btdepth
,
232 boolean_t caller_will_remove_entries_for_element
)
235 vm_size_t buffersize_needed
= 0, elemsize_needed
= 0;
236 vm_address_t buffer
= 0, elem_buffer
= 0, elem_hash_buffer
= 0;
239 size_t btrecord_size
= 0;
240 uintptr_t free_elem
= 0, next_free_elem
= 0;
242 if (vm_kernel_ready
&& !kmem_alloc_ready
) {
246 if (numrecords
> BTLOG_MAX_RECORDS
) {
250 if (numrecords
== 0) {
254 if (record_btdepth
> BTLOG_MAX_DEPTH
) {
258 /* btlog_record_t is variable-sized, calculate needs now */
259 btrecord_size
= sizeof(btlog_record_t
)
260 + sizeof(void *) * record_btdepth
;
262 buffersize_needed
= sizeof(btlog_t
) + numrecords
* btrecord_size
;
263 buffersize_needed
= round_page(buffersize_needed
);
265 if (zelems_count
== 0) {
266 zelems_count
= ((max_mem
+ (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT
;
268 if (PE_parse_boot_argn("zelems", &zelems_count
, sizeof(zelems_count
)) == TRUE
) {
270 * Need a max? With this scheme, it should be possible to tune the default
271 * so that we don't need a boot-arg to request more elements.
273 printf("Set number of log elements per btlog to: %ld\n", zelems_count
);
276 elemsize_needed
= sizeof(btlog_element_t
) * zelems_count
;
277 elemsize_needed
= round_page(elemsize_needed
);
279 /* since rounding to a page size might hold more, recalculate */
280 numrecords
= MIN(BTLOG_MAX_RECORDS
,
281 (buffersize_needed
- sizeof(btlog_t
)) / btrecord_size
);
283 if (kmem_alloc_ready
) {
284 ret
= kmem_alloc(kernel_map
, &buffer
, buffersize_needed
, VM_KERN_MEMORY_DIAG
);
285 if (ret
!= KERN_SUCCESS
) {
289 ret
= kmem_alloc(kernel_map
, &elem_buffer
, elemsize_needed
, VM_KERN_MEMORY_DIAG
);
290 if (ret
!= KERN_SUCCESS
) {
291 kmem_free(kernel_map
, buffer
, buffersize_needed
);
296 if (caller_will_remove_entries_for_element
== TRUE
) {
297 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
299 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, 2 * sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
302 if (ret
!= KERN_SUCCESS
) {
303 kmem_free(kernel_map
, buffer
, buffersize_needed
);
306 kmem_free(kernel_map
, elem_buffer
, elemsize_needed
);
311 buffer
= (vm_address_t
)pmap_steal_memory(buffersize_needed
);
312 elem_buffer
= (vm_address_t
)pmap_steal_memory(elemsize_needed
);
313 if (caller_will_remove_entries_for_element
== TRUE
) {
314 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*));
316 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(2 * sizeof(btlog_element_t
*));
321 btlog
= (btlog_t
*)buffer
;
322 btlog
->btlog_buffer
= buffer
;
323 btlog
->btlog_buffersize
= buffersize_needed
;
324 btlog
->freelist_elements
= (btlog_element_t
*)elem_buffer
;
326 simple_lock_init(&btlog
->btlog_lock
, 0);
328 btlog
->caller_will_remove_entries_for_element
= caller_will_remove_entries_for_element
;
330 if (caller_will_remove_entries_for_element
== TRUE
) {
331 btlog
->elem_linkage_un
.elem_recindex_hashtbl
= (btlog_element_t
**)elem_hash_buffer
;
333 btlog
->elem_linkage_un
.element_hash_queue
= (struct _element_hash_queue
*) elem_hash_buffer
;
334 TAILQ_INIT(btlog
->elem_linkage_un
.element_hash_queue
);
337 btlog
->btrecords
= (uintptr_t)(buffer
+ sizeof(btlog_t
));
338 btlog
->btrecord_btdepth
= record_btdepth
;
339 btlog
->btrecord_size
= btrecord_size
;
341 btlog
->head
= BTLOG_RECORDINDEX_NONE
;
342 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
343 btlog
->active_record_count
= 0;
344 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
346 for (i
= 0; i
< ELEMENT_HASH_BUCKET_COUNT
; i
++) {
347 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[i
] = 0;
350 /* populate freelist_records with all records in order */
351 btlog
->freelist_records
= 0;
352 for (i
= 0; i
< (numrecords
- 1); i
++) {
353 btlog_record_t
*rec
= lookup_btrecord(btlog
, i
);
354 rec
->next
= (btlog_recordindex_t
)(i
+ 1);
356 lookup_btrecord(btlog
, i
)->next
= BTLOG_RECORDINDEX_NONE
; /* terminate */
358 /* populate freelist_elements with all elements in order */
359 free_elem
= (uintptr_t)btlog
->freelist_elements
;
361 for (i
= 0; i
< (zelems_count
- 1); i
++) {
362 next_free_elem
= free_elem
+ sizeof(btlog_element_t
);
363 *(uintptr_t*)free_elem
= next_free_elem
;
364 free_elem
= next_free_elem
;
366 *(uintptr_t*)next_free_elem
= BTLOG_HASHELEMINDEX_NONE
;
371 /* Assumes btlog is already locked */
372 static btlog_recordindex_t
373 btlog_get_record_from_freelist(btlog_t
*btlog
)
375 btlog_recordindex_t recindex
= btlog
->freelist_records
;
377 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
378 /* nothing on freelist */
379 return BTLOG_RECORDINDEX_NONE
;
381 /* remove the head of the freelist_records */
382 btlog_record_t
*record
= lookup_btrecord(btlog
, recindex
);
383 btlog
->freelist_records
= record
->next
;
389 btlog_add_record_to_freelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
391 btlog_recordindex_t precindex
= BTLOG_RECORDINDEX_NONE
;
392 btlog_record_t
*precord
= NULL
, *record
= NULL
;
394 record
= lookup_btrecord(btlog
, recindex
);
396 assert(TAILQ_EMPTY(&record
->element_record_queue
));
400 precindex
= btlog
->head
;
401 precord
= lookup_btrecord(btlog
, precindex
);
403 if (precindex
== recindex
) {
404 btlog
->head
= precord
->next
;
405 btlog
->active_record_count
--;
407 record
->next
= btlog
->freelist_records
;
408 btlog
->freelist_records
= recindex
;
410 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
411 /* active list is now empty, update tail */
412 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
413 assert(btlog
->active_record_count
== 0);
416 while (precindex
!= BTLOG_RECORDINDEX_NONE
) {
417 if (precord
->next
== recindex
) {
418 precord
->next
= record
->next
;
419 btlog
->active_record_count
--;
421 record
->next
= btlog
->freelist_records
;
422 btlog
->freelist_records
= recindex
;
424 if (btlog
->tail
== recindex
) {
425 btlog
->tail
= precindex
;
429 precindex
= precord
->next
;
430 precord
= lookup_btrecord(btlog
, precindex
);
437 /* Assumes btlog is already locked */
439 btlog_evict_elements_from_record(btlog_t
*btlog
, int num_elements_to_evict
)
441 btlog_recordindex_t recindex
= btlog
->head
;
442 btlog_record_t
*record
= NULL
;
443 btlog_element_t
*recelem
= NULL
;
445 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
446 /* nothing on active list */
447 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog
);
449 while (num_elements_to_evict
) {
451 * LEAKS: reap the oldest element within the record with the lowest refs.
452 * CORRUPTION: reap the oldest element overall and drop its reference on the record
455 if (btlog
->caller_will_remove_entries_for_element
) {
456 uint32_t max_refs_threshold
= UINT32_MAX
;
457 btlog_recordindex_t precindex
= 0, prev_evictindex
= 0, evict_index
= 0;
459 prev_evictindex
= evict_index
= btlog
->head
;
460 precindex
= recindex
= btlog
->head
;
462 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
463 record
= lookup_btrecord(btlog
, recindex
);
465 if (btlog
->activerecord
== recindex
|| record
->ref_count
> max_refs_threshold
) {
466 /* skip this record */
468 prev_evictindex
= precindex
;
469 evict_index
= recindex
;
470 max_refs_threshold
= record
->ref_count
;
473 if (record
->next
!= BTLOG_RECORDINDEX_NONE
) {
474 precindex
= recindex
;
477 recindex
= record
->next
;
480 recindex
= evict_index
;
481 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
482 record
= lookup_btrecord(btlog
, recindex
);
484 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
486 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
487 recindex
= recelem
->recindex
;
488 record
= lookup_btrecord(btlog
, recindex
);
492 * Here we have the element to drop (recelem), its record and the record index.
495 while (recelem
&& num_elements_to_evict
) {
496 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
498 if (btlog
->caller_will_remove_entries_for_element
) {
499 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
500 uint32_t hashidx
= 0;
502 hashidx
= calculate_hashidx_for_element(~recelem
->elem
, btlog
);
504 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
505 while (hashelem
!= NULL
) {
506 if (hashelem
== recelem
) {
509 prev_hashelem
= hashelem
;
510 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
514 if (hashelem
== NULL
) {
515 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record
);
518 if (prev_hashelem
!= hashelem
) {
519 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
521 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
524 TAILQ_REMOVE(btlog
->elem_linkage_un
.element_hash_queue
, recelem
, element_hash_link
);
527 btlog_add_elem_to_freelist(btlog
, recelem
);
528 btlog
->active_element_count
--;
530 num_elements_to_evict
--;
532 assert(record
->ref_count
);
536 if (record
->ref_count
== 0) {
537 btlog_add_record_to_freelist(btlog
, recindex
);
540 * LEAKS: All done with this record. Need the next least popular record.
541 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
544 if (btlog
->caller_will_remove_entries_for_element
) {
549 if (btlog
->caller_will_remove_entries_for_element
) {
550 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
552 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
553 recindex
= recelem
->recindex
;
554 record
= lookup_btrecord(btlog
, recindex
);
561 /* Assumes btlog is already locked */
563 btlog_append_record_to_activelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
565 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
567 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
568 /* empty active list, update both head and tail */
569 btlog
->head
= btlog
->tail
= recindex
;
571 btlog_record_t
*record
= lookup_btrecord(btlog
, btlog
->tail
);
572 record
->next
= recindex
;
573 btlog
->tail
= recindex
;
575 btlog
->active_record_count
++;
579 btlog_get_elem_from_freelist(btlog_t
*btlog
)
581 btlog_element_t
*free_elem
= NULL
;
584 free_elem
= btlog
->freelist_elements
;
586 if ((uintptr_t)free_elem
== BTLOG_HASHELEMINDEX_NONE
) {
587 /* nothing on freelist */
588 btlog_evict_elements_from_record(btlog
, 1);
591 /* remove the head of the freelist */
592 uintptr_t next_elem
= *(uintptr_t*)free_elem
;
593 btlog
->freelist_elements
= (btlog_element_t
*)next_elem
;
599 btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*elem
)
601 btlog_element_t
*free_elem
= btlog
->freelist_elements
;
603 TAILQ_NEXT(elem
, element_hash_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
604 TAILQ_NEXT(elem
, element_record_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
606 *(uintptr_t*)elem
= (uintptr_t)free_elem
;
607 btlog
->freelist_elements
= elem
;
611 btlog_add_entry(btlog_t
*btlog
,
617 btlog_recordindex_t recindex
= 0;
618 btlog_record_t
*record
= NULL
;
620 u_int32_t md5_buffer
[4];
622 uint32_t hashidx
= 0;
624 btlog_element_t
*hashelem
= NULL
;
626 if (g_crypto_funcs
== NULL
) {
633 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
634 MD5Update(&btlog_ctx
, (u_char
*) &bt
[i
], sizeof(bt
[i
]));
636 MD5Final((u_char
*) &md5_buffer
, &btlog_ctx
);
638 recindex
= lookup_btrecord_byhash(btlog
, md5_buffer
[0], bt
, btcount
);
640 if (recindex
!= BTLOG_RECORDINDEX_NONE
) {
641 record
= lookup_btrecord(btlog
, recindex
);
643 assert(record
->operation
== operation
);
646 /* If there's a free record, use it */
647 recindex
= btlog_get_record_from_freelist(btlog
);
648 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
649 /* Use the first active record (FIFO age-out) */
650 btlog_evict_elements_from_record(btlog
, ((2 * sizeof(btlog_record_t
)) / sizeof(btlog_element_t
)));
654 record
= lookup_btrecord(btlog
, recindex
);
656 /* we always add to the tail, so there is no next pointer */
657 record
->next
= BTLOG_RECORDINDEX_NONE
;
658 record
->operation
= operation
;
659 record
->bthash
= md5_buffer
[0];
660 record
->ref_count
= 1;
661 TAILQ_INIT(&record
->element_record_queue
);
663 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
664 record
->bt
[i
] = bt
[i
];
667 for (; i
< btlog
->btrecord_btdepth
; i
++) {
668 record
->bt
[i
] = NULL
;
671 btlog_append_record_to_activelist(btlog
, recindex
);
674 btlog
->activerecord
= recindex
;
676 hashidx
= calculate_hashidx_for_element((uintptr_t)element
, btlog
);
677 hashelem
= btlog_get_elem_from_freelist(btlog
);
679 hashelem
->elem
= ~((uintptr_t)element
);
680 hashelem
->operation
= record
->operation
;
681 hashelem
->recindex
= recindex
;
683 TAILQ_INSERT_HEAD(&record
->element_record_queue
, hashelem
, element_record_link
);
685 if (btlog
->caller_will_remove_entries_for_element
) {
686 TAILQ_NEXT(hashelem
, element_hash_link
) = btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
687 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = hashelem
;
689 TAILQ_INSERT_HEAD(btlog
->elem_linkage_un
.element_hash_queue
, hashelem
, element_hash_link
);
692 btlog
->active_element_count
++;
694 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
700 btlog_remove_entries_for_element(btlog_t
*btlog
,
703 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
704 btlog_record_t
*record
= NULL
;
705 uint32_t hashidx
= 0;
707 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
709 if (btlog
->caller_will_remove_entries_for_element
== FALSE
) {
710 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog
);
713 if (g_crypto_funcs
== NULL
) {
719 hashidx
= calculate_hashidx_for_element((uintptr_t) element
, btlog
);
720 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
722 while (hashelem
!= NULL
) {
723 if (~hashelem
->elem
== (uintptr_t)element
) {
726 prev_hashelem
= hashelem
;
727 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
732 btlog_element_t
*recelem
= NULL
;
734 if (prev_hashelem
!= hashelem
) {
735 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
737 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
740 recindex
= hashelem
->recindex
;
741 record
= lookup_btrecord(btlog
, recindex
);
744 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
746 btlog_add_elem_to_freelist(btlog
, hashelem
);
747 btlog
->active_element_count
--;
749 assert(record
->ref_count
);
753 if (record
->ref_count
== 0) {
754 btlog_add_record_to_freelist(btlog
, recindex
);
761 #if DEBUG || DEVELOPMENT
764 btlog_copy_backtraces_for_elements(btlog_t
* btlog
,
765 uintptr_t * instances
,
771 btlog_recordindex_t recindex
;
772 btlog_record_t
* record
;
773 btlog_element_t
* hashelem
;
774 uint32_t hashidx
, idx
, dups
, numSites
, siteCount
;
775 uintptr_t element
, site
;
781 for (numSites
= 0, idx
= 0; idx
< count
; idx
++) {
782 element
= instances
[idx
];
784 if (kInstanceFlagReferenced
& element
) {
787 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
790 hashidx
= calculate_hashidx_for_element(element
, btlog
);
791 hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
792 while (hashelem
!= NULL
) {
793 if (~hashelem
->elem
== element
) {
796 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
799 recindex
= hashelem
->recindex
;
800 site
= (uintptr_t) lookup_btrecord(btlog
, recindex
);
803 element
= (site
| kInstanceFlagReferenced
);
805 instances
[numSites
] = INSTANCE_PUT(element
);
809 for (idx
= 0; idx
< numSites
; idx
++) {
810 site
= instances
[idx
];
814 if (!(kInstanceFlagReferenced
& site
)) {
817 for (siteCount
= 1, dups
= (idx
+ 1); dups
< numSites
; dups
++) {
818 if (instances
[dups
] == site
) {
823 record
= (typeof(record
))(INSTANCE_PUT(site
) & ~kInstanceFlags
);
824 (*proc
)(refCon
, siteCount
, zoneSize
, (uintptr_t *) &record
->bt
[0], (uint32_t) btlog
->btrecord_btdepth
);
833 * Returns the number of records in the btlog struct.
835 * Called by the mach_zone_get_btlog_records() MIG routine.
838 get_btlog_records_count(btlog_t
*btlog
)
840 if (btlog
->btlog_buffersize
< sizeof(btlog_t
)) {
843 return (btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
;
847 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
848 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
850 * Called by the mach_zone_get_btlog_records() MIG routine.
853 get_btlog_records(btlog_t
*btlog
, zone_btrecord_t
*records
, unsigned int *numrecs
)
855 unsigned int count
, recs_copied
, frame
;
856 zone_btrecord_t
*current_rec
;
857 btlog_record_t
*zstack_record
;
858 btlog_recordindex_t zstack_index
= BTLOG_RECORDINDEX_NONE
;
863 if (btlog
->btlog_buffersize
> sizeof(btlog_t
)) {
864 count
= (unsigned int)((btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
);
866 /* Copy out only as many records as the pre-allocated buffer size permits. */
867 if (count
> *numrecs
) {
870 zstack_index
= btlog
->head
;
872 current_rec
= &records
[0];
874 while (recs_copied
< count
&& (zstack_index
!= BTLOG_RECORDINDEX_NONE
)) {
875 zstack_record
= lookup_btrecord(btlog
, zstack_index
);
876 current_rec
->operation_type
= (uint32_t)(zstack_record
->operation
);
877 current_rec
->ref_count
= zstack_record
->ref_count
;
880 while (frame
< MIN(btlog
->btrecord_btdepth
, MAX_ZTRACE_DEPTH
)) {
881 current_rec
->bt
[frame
] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record
->bt
[frame
]);
885 zstack_index
= zstack_record
->next
;
889 *numrecs
= recs_copied
;
894 #endif /* DEBUG || DEVELOPMENT */