2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <kern/startup.h>
33 #include <vm/vm_kern.h>
34 #include <vm/vm_map.h>
36 #include <mach/vm_param.h>
38 #include <libkern/crypto/md5.h>
39 #include <libkern/crypto/crypto_internal.h>
42 * Since all records are located contiguously in memory,
43 * we use indices to them as the primary lookup mechanism,
44 * and to maintain the linked list of active records
45 * in chronological order.
47 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
48 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
51 * Each record is a stack with a reference count and a list of
52 * log elements that refer to it.
54 * Each log element is placed in a hash bucket that is contained
55 * within the btlog structure. It contains the index to the record
58 * So you can go from an address to the corresp. stack by hashing the address,
59 * finding the hash head and traversing the chain of log elements
60 * till you find the hash bucket with an address that matches your
61 * address (if it exists) or creating a new bucket to hold this new address.
64 #define ELEMENT_HASH_BUCKET_COUNT (256)
65 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
67 #define ZELEMS_DEFAULT (8000)
68 size_t zelems_count
= 0;
70 typedef uint32_t btlog_recordindex_t
; /* only 24 bits used */
73 * Queue head for the queue of elements connected to a particular record (stack).
74 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
76 TAILQ_HEAD(_element_record_queue
, btlog_element
);
79 * Queue head for the queue of elements that hash to the same bucket.
80 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
82 TAILQ_HEAD(_element_hash_queue
, btlog_element
);
84 typedef struct btlog_record
{
85 btlog_recordindex_t next
:24,
89 struct _element_record_queue element_record_queue
;
90 void *bt
[];/* variable sized, based on btlog_t params */
93 typedef struct btlog_element
{
94 btlog_recordindex_t recindex
:24,
97 TAILQ_ENTRY(btlog_element
) element_record_link
; /* Links to other elements pointing to the same stack. */
99 TAILQ_ENTRY(btlog_element
) element_hash_link
; /* Links to other elements in the same hash chain.
100 * During LEAKS mode, this is used as a singly-linked list because
101 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
103 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
108 vm_address_t btlog_buffer
; /* all memory for this btlog_t */
109 vm_size_t btlog_buffersize
;
111 uintptr_t btrecords
; /* use btlog_recordindex_t to lookup */
112 size_t btrecord_btdepth
;/* BT entries per record */
113 size_t btrecord_size
;
115 btlog_recordindex_t head
; /* active record list */
116 btlog_recordindex_t tail
;
117 btlog_recordindex_t activerecord
;
118 btlog_recordindex_t freelist_records
;
120 size_t active_record_count
;
121 size_t active_element_count
;
122 btlog_element_t
*freelist_elements
;
124 btlog_element_t
**elem_recindex_hashtbl
; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
125 struct _element_hash_queue
*element_hash_queue
; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
128 decl_simple_lock_data(, btlog_lock
);
129 boolean_t caller_will_remove_entries_for_element
;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
130 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
131 * will choose what kind of data structure to use for the elem_linkage_un union above.
135 #define lookup_btrecord(btlog, index) \
136 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
138 uint32_t calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
);
139 uint32_t lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
);
141 void btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*hash_elem
);
142 btlog_element_t
* btlog_get_elem_from_freelist(btlog_t
*btlog
);
145 lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
)
147 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
148 btlog_record_t
*record
= NULL
;
150 boolean_t stack_matched
= TRUE
;
155 recindex
= btlog
->head
;
156 record
= lookup_btrecord(btlog
, recindex
);
157 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
158 assert(!TAILQ_EMPTY(&record
->element_record_queue
));
159 if (record
->bthash
== md5_hash
) {
161 * Make sure that the incoming stack actually matches the
162 * stack in this record. Since we only save off a
163 * part of the md5 hash there can be collisions sometimes.
164 * This comparison isn't costly because, in case of collisions,
165 * usually the first few frames are different.
168 stack_matched
= TRUE
;
170 if (btcount
< btlog
->btrecord_btdepth
) {
171 if (record
->bt
[btcount
] != NULL
) {
173 * If the stack depth passed in is smaller than
174 * the recorded stack and we have a valid pointer
175 * in the recorded stack at that depth, then we
176 * don't need to do any further checks.
178 stack_matched
= FALSE
;
183 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
184 if (record
->bt
[i
] != bt
[i
]) {
185 stack_matched
= FALSE
;
190 if (stack_matched
== TRUE
) {
195 recindex
= record
->next
;
196 record
= lookup_btrecord(btlog
, recindex
);
203 calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
)
205 if (btlog
->caller_will_remove_entries_for_element
) {
208 addr
= (uint32_t) ((elem
& 0xFF00) >> 0x8);
217 btlog_lock(btlog_t
*btlog
)
219 simple_lock(&btlog
->btlog_lock
, LCK_GRP_NULL
);
222 btlog_unlock(btlog_t
*btlog
)
224 simple_unlock(&btlog
->btlog_lock
);
228 btlog_create(size_t numrecords
,
229 size_t record_btdepth
,
230 boolean_t caller_will_remove_entries_for_element
)
233 vm_size_t buffersize_needed
= 0, elemsize_needed
= 0;
234 vm_address_t buffer
= 0, elem_buffer
= 0, elem_hash_buffer
= 0;
237 size_t btrecord_size
= 0;
238 uintptr_t free_elem
= 0, next_free_elem
= 0;
240 if (startup_phase
>= STARTUP_SUB_VM_KERNEL
&&
241 startup_phase
< STARTUP_SUB_KMEM_ALLOC
) {
245 if (numrecords
> BTLOG_MAX_RECORDS
) {
249 if (numrecords
== 0) {
253 if (record_btdepth
> BTLOG_MAX_DEPTH
) {
257 /* btlog_record_t is variable-sized, calculate needs now */
258 btrecord_size
= sizeof(btlog_record_t
)
259 + sizeof(void *) * record_btdepth
;
261 buffersize_needed
= sizeof(btlog_t
) + numrecords
* btrecord_size
;
262 buffersize_needed
= round_page(buffersize_needed
);
264 if (zelems_count
== 0) {
265 zelems_count
= ((max_mem
+ (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT
;
267 if (PE_parse_boot_argn("zelems", &zelems_count
, sizeof(zelems_count
)) == TRUE
) {
269 * Need a max? With this scheme, it should be possible to tune the default
270 * so that we don't need a boot-arg to request more elements.
272 printf("Set number of log elements per btlog to: %ld\n", zelems_count
);
275 elemsize_needed
= sizeof(btlog_element_t
) * zelems_count
;
276 elemsize_needed
= round_page(elemsize_needed
);
278 /* since rounding to a page size might hold more, recalculate */
279 numrecords
= MIN(BTLOG_MAX_RECORDS
,
280 (buffersize_needed
- sizeof(btlog_t
)) / btrecord_size
);
282 if (__probable(startup_phase
>= STARTUP_SUB_KMEM_ALLOC
)) {
283 ret
= kmem_alloc(kernel_map
, &buffer
, buffersize_needed
, VM_KERN_MEMORY_DIAG
);
284 if (ret
!= KERN_SUCCESS
) {
288 ret
= kmem_alloc(kernel_map
, &elem_buffer
, elemsize_needed
, VM_KERN_MEMORY_DIAG
);
289 if (ret
!= KERN_SUCCESS
) {
290 kmem_free(kernel_map
, buffer
, buffersize_needed
);
295 if (caller_will_remove_entries_for_element
== TRUE
) {
296 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
298 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, 2 * sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
301 if (ret
!= KERN_SUCCESS
) {
302 kmem_free(kernel_map
, buffer
, buffersize_needed
);
305 kmem_free(kernel_map
, elem_buffer
, elemsize_needed
);
310 buffer
= (vm_address_t
)pmap_steal_memory(buffersize_needed
);
311 elem_buffer
= (vm_address_t
)pmap_steal_memory(elemsize_needed
);
312 if (caller_will_remove_entries_for_element
== TRUE
) {
313 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*));
315 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(2 * sizeof(btlog_element_t
*));
320 btlog
= (btlog_t
*)buffer
;
321 btlog
->btlog_buffer
= buffer
;
322 btlog
->btlog_buffersize
= buffersize_needed
;
323 btlog
->freelist_elements
= (btlog_element_t
*)elem_buffer
;
325 simple_lock_init(&btlog
->btlog_lock
, 0);
327 btlog
->caller_will_remove_entries_for_element
= caller_will_remove_entries_for_element
;
329 if (caller_will_remove_entries_for_element
== TRUE
) {
330 btlog
->elem_linkage_un
.elem_recindex_hashtbl
= (btlog_element_t
**)elem_hash_buffer
;
332 btlog
->elem_linkage_un
.element_hash_queue
= (struct _element_hash_queue
*) elem_hash_buffer
;
333 TAILQ_INIT(btlog
->elem_linkage_un
.element_hash_queue
);
336 btlog
->btrecords
= (uintptr_t)(buffer
+ sizeof(btlog_t
));
337 btlog
->btrecord_btdepth
= record_btdepth
;
338 btlog
->btrecord_size
= btrecord_size
;
340 btlog
->head
= BTLOG_RECORDINDEX_NONE
;
341 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
342 btlog
->active_record_count
= 0;
343 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
345 for (i
= 0; i
< ELEMENT_HASH_BUCKET_COUNT
; i
++) {
346 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[i
] = 0;
349 /* populate freelist_records with all records in order */
350 btlog
->freelist_records
= 0;
351 for (i
= 0; i
< (numrecords
- 1); i
++) {
352 btlog_record_t
*rec
= lookup_btrecord(btlog
, i
);
353 rec
->next
= (btlog_recordindex_t
)(i
+ 1);
355 lookup_btrecord(btlog
, i
)->next
= BTLOG_RECORDINDEX_NONE
; /* terminate */
357 /* populate freelist_elements with all elements in order */
358 free_elem
= (uintptr_t)btlog
->freelist_elements
;
360 for (i
= 0; i
< (zelems_count
- 1); i
++) {
361 next_free_elem
= free_elem
+ sizeof(btlog_element_t
);
362 *(uintptr_t*)free_elem
= next_free_elem
;
363 free_elem
= next_free_elem
;
365 *(uintptr_t*)next_free_elem
= BTLOG_HASHELEMINDEX_NONE
;
370 /* Assumes btlog is already locked */
371 static btlog_recordindex_t
372 btlog_get_record_from_freelist(btlog_t
*btlog
)
374 btlog_recordindex_t recindex
= btlog
->freelist_records
;
376 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
377 /* nothing on freelist */
378 return BTLOG_RECORDINDEX_NONE
;
380 /* remove the head of the freelist_records */
381 btlog_record_t
*record
= lookup_btrecord(btlog
, recindex
);
382 btlog
->freelist_records
= record
->next
;
388 btlog_add_record_to_freelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
390 btlog_recordindex_t precindex
= BTLOG_RECORDINDEX_NONE
;
391 btlog_record_t
*precord
= NULL
, *record
= NULL
;
393 record
= lookup_btrecord(btlog
, recindex
);
395 assert(TAILQ_EMPTY(&record
->element_record_queue
));
399 precindex
= btlog
->head
;
400 precord
= lookup_btrecord(btlog
, precindex
);
402 if (precindex
== recindex
) {
403 btlog
->head
= precord
->next
;
404 btlog
->active_record_count
--;
406 record
->next
= btlog
->freelist_records
;
407 btlog
->freelist_records
= recindex
;
409 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
410 /* active list is now empty, update tail */
411 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
412 assert(btlog
->active_record_count
== 0);
415 while (precindex
!= BTLOG_RECORDINDEX_NONE
) {
416 if (precord
->next
== recindex
) {
417 precord
->next
= record
->next
;
418 btlog
->active_record_count
--;
420 record
->next
= btlog
->freelist_records
;
421 btlog
->freelist_records
= recindex
;
423 if (btlog
->tail
== recindex
) {
424 btlog
->tail
= precindex
;
428 precindex
= precord
->next
;
429 precord
= lookup_btrecord(btlog
, precindex
);
436 /* Assumes btlog is already locked */
438 btlog_evict_elements_from_record(btlog_t
*btlog
, int num_elements_to_evict
)
440 btlog_recordindex_t recindex
= btlog
->head
;
441 btlog_record_t
*record
= NULL
;
442 btlog_element_t
*recelem
= NULL
;
444 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
445 /* nothing on active list */
446 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog
);
448 while (num_elements_to_evict
) {
450 * LEAKS: reap the oldest element within the record with the lowest refs.
451 * CORRUPTION: reap the oldest element overall and drop its reference on the record
454 if (btlog
->caller_will_remove_entries_for_element
) {
455 uint32_t max_refs_threshold
= UINT32_MAX
;
456 btlog_recordindex_t precindex
= 0, prev_evictindex
= 0, evict_index
= 0;
458 prev_evictindex
= evict_index
= btlog
->head
;
459 precindex
= recindex
= btlog
->head
;
461 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
462 record
= lookup_btrecord(btlog
, recindex
);
464 if (btlog
->activerecord
== recindex
|| record
->ref_count
> max_refs_threshold
) {
465 /* skip this record */
467 prev_evictindex
= precindex
;
468 evict_index
= recindex
;
469 max_refs_threshold
= record
->ref_count
;
472 if (record
->next
!= BTLOG_RECORDINDEX_NONE
) {
473 precindex
= recindex
;
476 recindex
= record
->next
;
479 recindex
= evict_index
;
480 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
481 record
= lookup_btrecord(btlog
, recindex
);
483 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
485 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
486 recindex
= recelem
->recindex
;
487 record
= lookup_btrecord(btlog
, recindex
);
491 * Here we have the element to drop (recelem), its record and the record index.
494 while (recelem
&& num_elements_to_evict
) {
495 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
497 if (btlog
->caller_will_remove_entries_for_element
) {
498 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
499 uint32_t hashidx
= 0;
501 hashidx
= calculate_hashidx_for_element(~recelem
->elem
, btlog
);
503 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
504 while (hashelem
!= NULL
) {
505 if (hashelem
== recelem
) {
508 prev_hashelem
= hashelem
;
509 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
513 if (hashelem
== NULL
) {
514 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record
);
517 if (prev_hashelem
!= hashelem
) {
518 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
520 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
523 TAILQ_REMOVE(btlog
->elem_linkage_un
.element_hash_queue
, recelem
, element_hash_link
);
526 btlog_add_elem_to_freelist(btlog
, recelem
);
527 btlog
->active_element_count
--;
529 num_elements_to_evict
--;
531 assert(record
->ref_count
);
535 if (record
->ref_count
== 0) {
536 btlog_add_record_to_freelist(btlog
, recindex
);
539 * LEAKS: All done with this record. Need the next least popular record.
540 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
543 if (btlog
->caller_will_remove_entries_for_element
) {
548 if (btlog
->caller_will_remove_entries_for_element
) {
549 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
551 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
552 recindex
= recelem
->recindex
;
553 record
= lookup_btrecord(btlog
, recindex
);
560 /* Assumes btlog is already locked */
562 btlog_append_record_to_activelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
564 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
566 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
567 /* empty active list, update both head and tail */
568 btlog
->head
= btlog
->tail
= recindex
;
570 btlog_record_t
*record
= lookup_btrecord(btlog
, btlog
->tail
);
571 record
->next
= recindex
;
572 btlog
->tail
= recindex
;
574 btlog
->active_record_count
++;
578 btlog_get_elem_from_freelist(btlog_t
*btlog
)
580 btlog_element_t
*free_elem
= NULL
;
583 free_elem
= btlog
->freelist_elements
;
585 if ((uintptr_t)free_elem
== BTLOG_HASHELEMINDEX_NONE
) {
586 /* nothing on freelist */
587 btlog_evict_elements_from_record(btlog
, 1);
590 /* remove the head of the freelist */
591 uintptr_t next_elem
= *(uintptr_t*)free_elem
;
592 btlog
->freelist_elements
= (btlog_element_t
*)next_elem
;
598 btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*elem
)
600 btlog_element_t
*free_elem
= btlog
->freelist_elements
;
602 TAILQ_NEXT(elem
, element_hash_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
603 TAILQ_NEXT(elem
, element_record_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
605 *(uintptr_t*)elem
= (uintptr_t)free_elem
;
606 btlog
->freelist_elements
= elem
;
610 btlog_add_entry(btlog_t
*btlog
,
616 btlog_recordindex_t recindex
= 0;
617 btlog_record_t
*record
= NULL
;
619 u_int32_t md5_buffer
[4];
621 uint32_t hashidx
= 0;
623 btlog_element_t
*hashelem
= NULL
;
625 if (g_crypto_funcs
== NULL
) {
632 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
633 MD5Update(&btlog_ctx
, (u_char
*) &bt
[i
], sizeof(bt
[i
]));
635 MD5Final((u_char
*) &md5_buffer
, &btlog_ctx
);
637 recindex
= lookup_btrecord_byhash(btlog
, md5_buffer
[0], bt
, btcount
);
639 if (recindex
!= BTLOG_RECORDINDEX_NONE
) {
640 record
= lookup_btrecord(btlog
, recindex
);
642 assert(record
->operation
== operation
);
645 /* If there's a free record, use it */
646 recindex
= btlog_get_record_from_freelist(btlog
);
647 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
648 /* Use the first active record (FIFO age-out) */
649 btlog_evict_elements_from_record(btlog
, ((2 * sizeof(btlog_record_t
)) / sizeof(btlog_element_t
)));
653 record
= lookup_btrecord(btlog
, recindex
);
655 /* we always add to the tail, so there is no next pointer */
656 record
->next
= BTLOG_RECORDINDEX_NONE
;
657 record
->operation
= operation
;
658 record
->bthash
= md5_buffer
[0];
659 record
->ref_count
= 1;
660 TAILQ_INIT(&record
->element_record_queue
);
662 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
663 record
->bt
[i
] = bt
[i
];
666 for (; i
< btlog
->btrecord_btdepth
; i
++) {
667 record
->bt
[i
] = NULL
;
670 btlog_append_record_to_activelist(btlog
, recindex
);
673 btlog
->activerecord
= recindex
;
675 hashidx
= calculate_hashidx_for_element((uintptr_t)element
, btlog
);
676 hashelem
= btlog_get_elem_from_freelist(btlog
);
678 hashelem
->elem
= ~((uintptr_t)element
);
679 hashelem
->operation
= record
->operation
;
680 hashelem
->recindex
= recindex
;
682 TAILQ_INSERT_HEAD(&record
->element_record_queue
, hashelem
, element_record_link
);
684 if (btlog
->caller_will_remove_entries_for_element
) {
685 TAILQ_NEXT(hashelem
, element_hash_link
) = btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
686 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = hashelem
;
688 TAILQ_INSERT_HEAD(btlog
->elem_linkage_un
.element_hash_queue
, hashelem
, element_hash_link
);
691 btlog
->active_element_count
++;
693 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
699 btlog_remove_entries_for_element(btlog_t
*btlog
,
702 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
703 btlog_record_t
*record
= NULL
;
704 uint32_t hashidx
= 0;
706 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
708 if (btlog
->caller_will_remove_entries_for_element
== FALSE
) {
709 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog
);
712 if (g_crypto_funcs
== NULL
) {
718 hashidx
= calculate_hashidx_for_element((uintptr_t) element
, btlog
);
719 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
721 while (hashelem
!= NULL
) {
722 if (~hashelem
->elem
== (uintptr_t)element
) {
725 prev_hashelem
= hashelem
;
726 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
731 btlog_element_t
*recelem
= NULL
;
733 if (prev_hashelem
!= hashelem
) {
734 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
736 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
739 recindex
= hashelem
->recindex
;
740 record
= lookup_btrecord(btlog
, recindex
);
743 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
745 btlog_add_elem_to_freelist(btlog
, hashelem
);
746 btlog
->active_element_count
--;
748 assert(record
->ref_count
);
752 if (record
->ref_count
== 0) {
753 btlog_add_record_to_freelist(btlog
, recindex
);
760 #if DEBUG || DEVELOPMENT
763 btlog_copy_backtraces_for_elements(btlog_t
* btlog
,
764 uintptr_t * instances
,
770 btlog_recordindex_t recindex
;
771 btlog_record_t
* record
;
772 btlog_element_t
* hashelem
;
773 uint32_t hashidx
, idx
, dups
, numSites
, siteCount
;
774 uintptr_t element
, site
;
780 for (numSites
= 0, idx
= 0; idx
< count
; idx
++) {
781 element
= instances
[idx
];
783 if (kInstanceFlagReferenced
& element
) {
786 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
789 hashidx
= calculate_hashidx_for_element(element
, btlog
);
790 hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
791 while (hashelem
!= NULL
) {
792 if (~hashelem
->elem
== element
) {
795 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
798 recindex
= hashelem
->recindex
;
799 site
= (uintptr_t) lookup_btrecord(btlog
, recindex
);
802 element
= (site
| kInstanceFlagReferenced
);
804 instances
[numSites
] = INSTANCE_PUT(element
);
808 for (idx
= 0; idx
< numSites
; idx
++) {
809 site
= instances
[idx
];
813 if (!(kInstanceFlagReferenced
& site
)) {
816 for (siteCount
= 1, dups
= (idx
+ 1); dups
< numSites
; dups
++) {
817 if (instances
[dups
] == site
) {
822 record
= (typeof(record
))(INSTANCE_PUT(site
) & ~kInstanceFlags
);
823 (*proc
)(refCon
, siteCount
, zoneSize
, (uintptr_t *) &record
->bt
[0], (uint32_t) btlog
->btrecord_btdepth
);
832 * Returns the number of records in the btlog struct.
834 * Called by the mach_zone_get_btlog_records() MIG routine.
837 get_btlog_records_count(btlog_t
*btlog
)
839 if (btlog
->btlog_buffersize
< sizeof(btlog_t
)) {
842 return (btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
;
846 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
847 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
849 * Called by the mach_zone_get_btlog_records() MIG routine.
852 get_btlog_records(btlog_t
*btlog
, zone_btrecord_t
*records
, unsigned int *numrecs
)
854 unsigned int count
, recs_copied
, frame
;
855 zone_btrecord_t
*current_rec
;
856 btlog_record_t
*zstack_record
;
857 btlog_recordindex_t zstack_index
= BTLOG_RECORDINDEX_NONE
;
862 if (btlog
->btlog_buffersize
> sizeof(btlog_t
)) {
863 count
= (unsigned int)((btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
);
865 /* Copy out only as many records as the pre-allocated buffer size permits. */
866 if (count
> *numrecs
) {
869 zstack_index
= btlog
->head
;
871 current_rec
= &records
[0];
873 while (recs_copied
< count
&& (zstack_index
!= BTLOG_RECORDINDEX_NONE
)) {
874 zstack_record
= lookup_btrecord(btlog
, zstack_index
);
875 current_rec
->operation_type
= (uint32_t)(zstack_record
->operation
);
876 current_rec
->ref_count
= zstack_record
->ref_count
;
879 while (frame
< MIN(btlog
->btrecord_btdepth
, MAX_ZTRACE_DEPTH
)) {
880 current_rec
->bt
[frame
] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record
->bt
[frame
]);
884 zstack_index
= zstack_record
->next
;
888 *numrecs
= recs_copied
;
893 #endif /* DEBUG || DEVELOPMENT */