2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
35 #include <mach/vm_param.h>
37 #include <libkern/crypto/md5.h>
38 #include <libkern/crypto/crypto_internal.h>
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
46 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
47 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
63 #define ELEMENT_HASH_BUCKET_COUNT (256)
64 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
66 #define ZELEMS_DEFAULT (8000)
67 size_t zelems_count
= 0;
69 typedef uint32_t btlog_recordindex_t
; /* only 24 bits used */
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
75 TAILQ_HEAD(_element_record_queue
, btlog_element
);
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
81 TAILQ_HEAD(_element_hash_queue
, btlog_element
);
83 typedef struct btlog_record
{
84 btlog_recordindex_t next
:24,
88 struct _element_record_queue element_record_queue
;
89 void *bt
[];/* variable sized, based on btlog_t params */
92 typedef struct btlog_element
{
93 btlog_recordindex_t recindex
:24,
96 TAILQ_ENTRY(btlog_element
) element_record_link
; /* Links to other elements pointing to the same stack. */
98 TAILQ_ENTRY(btlog_element
) element_hash_link
; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
107 vm_address_t btlog_buffer
; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize
;
110 uintptr_t btrecords
; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth
;/* BT entries per record */
112 size_t btrecord_size
;
114 btlog_recordindex_t head
; /* active record list */
115 btlog_recordindex_t tail
;
116 btlog_recordindex_t activerecord
;
117 btlog_recordindex_t freelist_records
;
119 size_t active_record_count
;
120 size_t active_element_count
;
121 btlog_element_t
*freelist_elements
;
123 btlog_element_t
**elem_recindex_hashtbl
; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue
*element_hash_queue
; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
127 decl_simple_lock_data(, btlog_lock
);
128 boolean_t caller_will_remove_entries_for_element
;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
134 extern boolean_t vm_kernel_ready
;
135 extern boolean_t kmem_alloc_ready
;
137 #define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
140 uint32_t calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
);
141 uint32_t lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
);
143 void btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*hash_elem
);
144 btlog_element_t
* btlog_get_elem_from_freelist(btlog_t
*btlog
);
147 lookup_btrecord_byhash(btlog_t
*btlog
, uint32_t md5_hash
, void *bt
[], size_t btcount
)
149 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
150 btlog_record_t
*record
= NULL
;
152 boolean_t stack_matched
= TRUE
;
157 recindex
= btlog
->head
;
158 record
= lookup_btrecord(btlog
, recindex
);
159 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
160 assert(record
->bthash
);
161 assert(!TAILQ_EMPTY(&record
->element_record_queue
));
162 if (record
->bthash
== md5_hash
) {
164 * Make sure that the incoming stack actually matches the
165 * stack in this record. Since we only save off a
166 * part of the md5 hash there can be collisions sometimes.
167 * This comparison isn't costly because, in case of collisions,
168 * usually the first few frames are different.
171 stack_matched
= TRUE
;
173 if (btcount
< btlog
->btrecord_btdepth
) {
174 if (record
->bt
[btcount
] != NULL
) {
176 * If the stack depth passed in is smaller than
177 * the recorded stack and we have a valid pointer
178 * in the recorded stack at that depth, then we
179 * don't need to do any further checks.
181 stack_matched
= FALSE
;
186 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
187 if (record
->bt
[i
] != bt
[i
]) {
188 stack_matched
= FALSE
;
193 if (stack_matched
== TRUE
) {
198 recindex
= record
->next
;
199 record
= lookup_btrecord(btlog
, recindex
);
206 calculate_hashidx_for_element(uintptr_t elem
, btlog_t
*btlog
)
208 if (btlog
->caller_will_remove_entries_for_element
) {
211 addr
= (uint32_t) ((elem
& 0xFF00) >> 0x8);
220 btlog_lock(btlog_t
*btlog
)
222 simple_lock(&btlog
->btlog_lock
, LCK_GRP_NULL
);
225 btlog_unlock(btlog_t
*btlog
)
227 simple_unlock(&btlog
->btlog_lock
);
231 btlog_create(size_t numrecords
,
232 size_t record_btdepth
,
233 boolean_t caller_will_remove_entries_for_element
)
236 vm_size_t buffersize_needed
= 0, elemsize_needed
= 0;
237 vm_address_t buffer
= 0, elem_buffer
= 0, elem_hash_buffer
= 0;
240 size_t btrecord_size
= 0;
241 uintptr_t free_elem
= 0, next_free_elem
= 0;
243 if (vm_kernel_ready
&& !kmem_alloc_ready
) {
247 if (numrecords
> BTLOG_MAX_RECORDS
) {
251 if (numrecords
== 0) {
255 if (record_btdepth
> BTLOG_MAX_DEPTH
) {
259 /* btlog_record_t is variable-sized, calculate needs now */
260 btrecord_size
= sizeof(btlog_record_t
)
261 + sizeof(void *) * record_btdepth
;
263 buffersize_needed
= sizeof(btlog_t
) + numrecords
* btrecord_size
;
264 buffersize_needed
= round_page(buffersize_needed
);
266 if (zelems_count
== 0) {
267 zelems_count
= ((max_mem
+ (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT
;
269 if (PE_parse_boot_argn("zelems", &zelems_count
, sizeof(zelems_count
)) == TRUE
) {
271 * Need a max? With this scheme, it should be possible to tune the default
272 * so that we don't need a boot-arg to request more elements.
274 printf("Set number of log elements per btlog to: %ld\n", zelems_count
);
277 elemsize_needed
= sizeof(btlog_element_t
) * zelems_count
;
278 elemsize_needed
= round_page(elemsize_needed
);
280 /* since rounding to a page size might hold more, recalculate */
281 numrecords
= MIN(BTLOG_MAX_RECORDS
,
282 (buffersize_needed
- sizeof(btlog_t
)) / btrecord_size
);
284 if (kmem_alloc_ready
) {
285 ret
= kmem_alloc(kernel_map
, &buffer
, buffersize_needed
, VM_KERN_MEMORY_DIAG
);
286 if (ret
!= KERN_SUCCESS
) {
290 ret
= kmem_alloc(kernel_map
, &elem_buffer
, elemsize_needed
, VM_KERN_MEMORY_DIAG
);
291 if (ret
!= KERN_SUCCESS
) {
292 kmem_free(kernel_map
, buffer
, buffersize_needed
);
297 if (caller_will_remove_entries_for_element
== TRUE
) {
298 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
300 ret
= kmem_alloc(kernel_map
, &elem_hash_buffer
, 2 * sizeof(btlog_element_t
*), VM_KERN_MEMORY_DIAG
);
303 if (ret
!= KERN_SUCCESS
) {
304 kmem_free(kernel_map
, buffer
, buffersize_needed
);
307 kmem_free(kernel_map
, elem_buffer
, elemsize_needed
);
312 buffer
= (vm_address_t
)pmap_steal_memory(buffersize_needed
);
313 elem_buffer
= (vm_address_t
)pmap_steal_memory(elemsize_needed
);
314 if (caller_will_remove_entries_for_element
== TRUE
) {
315 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT
* sizeof(btlog_element_t
*));
317 elem_hash_buffer
= (vm_address_t
)pmap_steal_memory(2 * sizeof(btlog_element_t
*));
322 btlog
= (btlog_t
*)buffer
;
323 btlog
->btlog_buffer
= buffer
;
324 btlog
->btlog_buffersize
= buffersize_needed
;
325 btlog
->freelist_elements
= (btlog_element_t
*)elem_buffer
;
327 simple_lock_init(&btlog
->btlog_lock
, 0);
329 btlog
->caller_will_remove_entries_for_element
= caller_will_remove_entries_for_element
;
331 if (caller_will_remove_entries_for_element
== TRUE
) {
332 btlog
->elem_linkage_un
.elem_recindex_hashtbl
= (btlog_element_t
**)elem_hash_buffer
;
334 btlog
->elem_linkage_un
.element_hash_queue
= (struct _element_hash_queue
*) elem_hash_buffer
;
335 TAILQ_INIT(btlog
->elem_linkage_un
.element_hash_queue
);
338 btlog
->btrecords
= (uintptr_t)(buffer
+ sizeof(btlog_t
));
339 btlog
->btrecord_btdepth
= record_btdepth
;
340 btlog
->btrecord_size
= btrecord_size
;
342 btlog
->head
= BTLOG_RECORDINDEX_NONE
;
343 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
344 btlog
->active_record_count
= 0;
345 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
347 for (i
= 0; i
< ELEMENT_HASH_BUCKET_COUNT
; i
++) {
348 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[i
] = 0;
351 /* populate freelist_records with all records in order */
352 btlog
->freelist_records
= 0;
353 for (i
= 0; i
< (numrecords
- 1); i
++) {
354 btlog_record_t
*rec
= lookup_btrecord(btlog
, i
);
355 rec
->next
= (btlog_recordindex_t
)(i
+ 1);
357 lookup_btrecord(btlog
, i
)->next
= BTLOG_RECORDINDEX_NONE
; /* terminate */
359 /* populate freelist_elements with all elements in order */
360 free_elem
= (uintptr_t)btlog
->freelist_elements
;
362 for (i
= 0; i
< (zelems_count
- 1); i
++) {
363 next_free_elem
= free_elem
+ sizeof(btlog_element_t
);
364 *(uintptr_t*)free_elem
= next_free_elem
;
365 free_elem
= next_free_elem
;
367 *(uintptr_t*)next_free_elem
= BTLOG_HASHELEMINDEX_NONE
;
372 /* Assumes btlog is already locked */
373 static btlog_recordindex_t
374 btlog_get_record_from_freelist(btlog_t
*btlog
)
376 btlog_recordindex_t recindex
= btlog
->freelist_records
;
378 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
379 /* nothing on freelist */
380 return BTLOG_RECORDINDEX_NONE
;
382 /* remove the head of the freelist_records */
383 btlog_record_t
*record
= lookup_btrecord(btlog
, recindex
);
384 btlog
->freelist_records
= record
->next
;
390 btlog_add_record_to_freelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
392 btlog_recordindex_t precindex
= BTLOG_RECORDINDEX_NONE
;
393 btlog_record_t
*precord
= NULL
, *record
= NULL
;
395 record
= lookup_btrecord(btlog
, recindex
);
397 assert(TAILQ_EMPTY(&record
->element_record_queue
));
401 precindex
= btlog
->head
;
402 precord
= lookup_btrecord(btlog
, precindex
);
404 if (precindex
== recindex
) {
405 btlog
->head
= precord
->next
;
406 btlog
->active_record_count
--;
408 record
->next
= btlog
->freelist_records
;
409 btlog
->freelist_records
= recindex
;
411 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
412 /* active list is now empty, update tail */
413 btlog
->tail
= BTLOG_RECORDINDEX_NONE
;
414 assert(btlog
->active_record_count
== 0);
417 while (precindex
!= BTLOG_RECORDINDEX_NONE
) {
418 if (precord
->next
== recindex
) {
419 precord
->next
= record
->next
;
420 btlog
->active_record_count
--;
422 record
->next
= btlog
->freelist_records
;
423 btlog
->freelist_records
= recindex
;
425 if (btlog
->tail
== recindex
) {
426 btlog
->tail
= precindex
;
430 precindex
= precord
->next
;
431 precord
= lookup_btrecord(btlog
, precindex
);
438 /* Assumes btlog is already locked */
440 btlog_evict_elements_from_record(btlog_t
*btlog
, int num_elements_to_evict
)
442 btlog_recordindex_t recindex
= btlog
->head
;
443 btlog_record_t
*record
= NULL
;
444 btlog_element_t
*recelem
= NULL
;
446 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
447 /* nothing on active list */
448 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog
);
450 while (num_elements_to_evict
) {
452 * LEAKS: reap the oldest element within the record with the lowest refs.
453 * CORRUPTION: reap the oldest element overall and drop its reference on the record
456 if (btlog
->caller_will_remove_entries_for_element
) {
457 uint32_t max_refs_threshold
= UINT32_MAX
;
458 btlog_recordindex_t precindex
= 0, prev_evictindex
= 0, evict_index
= 0;
460 prev_evictindex
= evict_index
= btlog
->head
;
461 precindex
= recindex
= btlog
->head
;
463 while (recindex
!= BTLOG_RECORDINDEX_NONE
) {
464 record
= lookup_btrecord(btlog
, recindex
);
466 if (btlog
->activerecord
== recindex
|| record
->ref_count
> max_refs_threshold
) {
467 /* skip this record */
469 prev_evictindex
= precindex
;
470 evict_index
= recindex
;
471 max_refs_threshold
= record
->ref_count
;
474 if (record
->next
!= BTLOG_RECORDINDEX_NONE
) {
475 precindex
= recindex
;
478 recindex
= record
->next
;
481 recindex
= evict_index
;
482 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
483 record
= lookup_btrecord(btlog
, recindex
);
485 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
487 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
488 recindex
= recelem
->recindex
;
489 record
= lookup_btrecord(btlog
, recindex
);
493 * Here we have the element to drop (recelem), its record and the record index.
496 while (recelem
&& num_elements_to_evict
) {
497 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
499 if (btlog
->caller_will_remove_entries_for_element
) {
500 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
501 uint32_t hashidx
= 0;
503 hashidx
= calculate_hashidx_for_element(~recelem
->elem
, btlog
);
505 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
506 while (hashelem
!= NULL
) {
507 if (hashelem
== recelem
) {
510 prev_hashelem
= hashelem
;
511 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
515 if (hashelem
== NULL
) {
516 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record
);
519 if (prev_hashelem
!= hashelem
) {
520 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
522 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
525 TAILQ_REMOVE(btlog
->elem_linkage_un
.element_hash_queue
, recelem
, element_hash_link
);
528 btlog_add_elem_to_freelist(btlog
, recelem
);
529 btlog
->active_element_count
--;
531 num_elements_to_evict
--;
533 assert(record
->ref_count
);
537 if (record
->ref_count
== 0) {
538 btlog_add_record_to_freelist(btlog
, recindex
);
541 * LEAKS: All done with this record. Need the next least popular record.
542 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
545 if (btlog
->caller_will_remove_entries_for_element
) {
550 if (btlog
->caller_will_remove_entries_for_element
) {
551 recelem
= TAILQ_LAST(&record
->element_record_queue
, _element_record_queue
);
553 recelem
= TAILQ_LAST(btlog
->elem_linkage_un
.element_hash_queue
, _element_hash_queue
);
554 recindex
= recelem
->recindex
;
555 record
= lookup_btrecord(btlog
, recindex
);
562 /* Assumes btlog is already locked */
564 btlog_append_record_to_activelist(btlog_t
*btlog
, btlog_recordindex_t recindex
)
566 assert(recindex
!= BTLOG_RECORDINDEX_NONE
);
568 if (btlog
->head
== BTLOG_RECORDINDEX_NONE
) {
569 /* empty active list, update both head and tail */
570 btlog
->head
= btlog
->tail
= recindex
;
572 btlog_record_t
*record
= lookup_btrecord(btlog
, btlog
->tail
);
573 record
->next
= recindex
;
574 btlog
->tail
= recindex
;
576 btlog
->active_record_count
++;
580 btlog_get_elem_from_freelist(btlog_t
*btlog
)
582 btlog_element_t
*free_elem
= NULL
;
585 free_elem
= btlog
->freelist_elements
;
587 if ((uintptr_t)free_elem
== BTLOG_HASHELEMINDEX_NONE
) {
588 /* nothing on freelist */
589 btlog_evict_elements_from_record(btlog
, 1);
592 /* remove the head of the freelist */
593 uintptr_t next_elem
= *(uintptr_t*)free_elem
;
594 btlog
->freelist_elements
= (btlog_element_t
*)next_elem
;
600 btlog_add_elem_to_freelist(btlog_t
*btlog
, btlog_element_t
*elem
)
602 btlog_element_t
*free_elem
= btlog
->freelist_elements
;
604 TAILQ_NEXT(elem
, element_hash_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
605 TAILQ_NEXT(elem
, element_record_link
) = (btlog_element_t
*) BTLOG_HASHELEMINDEX_NONE
;
607 *(uintptr_t*)elem
= (uintptr_t)free_elem
;
608 btlog
->freelist_elements
= elem
;
612 btlog_add_entry(btlog_t
*btlog
,
618 btlog_recordindex_t recindex
= 0;
619 btlog_record_t
*record
= NULL
;
621 u_int32_t md5_buffer
[4];
623 uint32_t hashidx
= 0;
625 btlog_element_t
*hashelem
= NULL
;
627 if (g_crypto_funcs
== NULL
) {
634 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
635 MD5Update(&btlog_ctx
, (u_char
*) &bt
[i
], sizeof(bt
[i
]));
637 MD5Final((u_char
*) &md5_buffer
, &btlog_ctx
);
639 recindex
= lookup_btrecord_byhash(btlog
, md5_buffer
[0], bt
, btcount
);
641 if (recindex
!= BTLOG_RECORDINDEX_NONE
) {
642 record
= lookup_btrecord(btlog
, recindex
);
644 assert(record
->operation
== operation
);
647 /* If there's a free record, use it */
648 recindex
= btlog_get_record_from_freelist(btlog
);
649 if (recindex
== BTLOG_RECORDINDEX_NONE
) {
650 /* Use the first active record (FIFO age-out) */
651 btlog_evict_elements_from_record(btlog
, ((2 * sizeof(btlog_record_t
)) / sizeof(btlog_element_t
)));
655 record
= lookup_btrecord(btlog
, recindex
);
657 /* we always add to the tail, so there is no next pointer */
658 record
->next
= BTLOG_RECORDINDEX_NONE
;
659 record
->operation
= operation
;
660 record
->bthash
= md5_buffer
[0];
661 record
->ref_count
= 1;
662 TAILQ_INIT(&record
->element_record_queue
);
664 for (i
= 0; i
< MIN(btcount
, btlog
->btrecord_btdepth
); i
++) {
665 record
->bt
[i
] = bt
[i
];
668 for (; i
< btlog
->btrecord_btdepth
; i
++) {
669 record
->bt
[i
] = NULL
;
672 btlog_append_record_to_activelist(btlog
, recindex
);
675 btlog
->activerecord
= recindex
;
677 hashidx
= calculate_hashidx_for_element((uintptr_t)element
, btlog
);
678 hashelem
= btlog_get_elem_from_freelist(btlog
);
680 assert(record
->bthash
);
682 hashelem
->elem
= ~((uintptr_t)element
);
683 hashelem
->operation
= record
->operation
;
684 hashelem
->recindex
= recindex
;
686 TAILQ_INSERT_HEAD(&record
->element_record_queue
, hashelem
, element_record_link
);
688 if (btlog
->caller_will_remove_entries_for_element
) {
689 TAILQ_NEXT(hashelem
, element_hash_link
) = btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
690 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = hashelem
;
692 TAILQ_INSERT_HEAD(btlog
->elem_linkage_un
.element_hash_queue
, hashelem
, element_hash_link
);
695 btlog
->active_element_count
++;
697 btlog
->activerecord
= BTLOG_RECORDINDEX_NONE
;
703 btlog_remove_entries_for_element(btlog_t
*btlog
,
706 btlog_recordindex_t recindex
= BTLOG_RECORDINDEX_NONE
;
707 btlog_record_t
*record
= NULL
;
708 uint32_t hashidx
= 0;
710 btlog_element_t
*prev_hashelem
= NULL
, *hashelem
= NULL
;
712 if (btlog
->caller_will_remove_entries_for_element
== FALSE
) {
713 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog
);
716 if (g_crypto_funcs
== NULL
) {
722 hashidx
= calculate_hashidx_for_element((uintptr_t) element
, btlog
);
723 prev_hashelem
= hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
725 while (hashelem
!= NULL
) {
726 if (~hashelem
->elem
== (uintptr_t)element
) {
729 prev_hashelem
= hashelem
;
730 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
735 btlog_element_t
*recelem
= NULL
;
737 if (prev_hashelem
!= hashelem
) {
738 TAILQ_NEXT(prev_hashelem
, element_hash_link
) = TAILQ_NEXT(hashelem
, element_hash_link
);
740 btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
] = TAILQ_NEXT(hashelem
, element_hash_link
);
743 recindex
= hashelem
->recindex
;
744 record
= lookup_btrecord(btlog
, recindex
);
747 TAILQ_REMOVE(&record
->element_record_queue
, recelem
, element_record_link
);
749 btlog_add_elem_to_freelist(btlog
, hashelem
);
750 btlog
->active_element_count
--;
752 assert(record
->ref_count
);
756 if (record
->ref_count
== 0) {
757 btlog_add_record_to_freelist(btlog
, recindex
);
764 #if DEBUG || DEVELOPMENT
767 btlog_copy_backtraces_for_elements(btlog_t
* btlog
,
768 uintptr_t * instances
,
774 btlog_recordindex_t recindex
;
775 btlog_record_t
* record
;
776 btlog_element_t
* hashelem
;
777 uint32_t hashidx
, idx
, dups
, numSites
, siteCount
;
778 uintptr_t element
, site
;
784 for (numSites
= 0, idx
= 0; idx
< count
; idx
++) {
785 element
= instances
[idx
];
787 if (kInstanceFlagReferenced
& element
) {
790 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
793 hashidx
= calculate_hashidx_for_element(element
, btlog
);
794 hashelem
= btlog
->elem_linkage_un
.elem_recindex_hashtbl
[hashidx
];
795 while (hashelem
!= NULL
) {
796 if (~hashelem
->elem
== element
) {
799 hashelem
= TAILQ_NEXT(hashelem
, element_hash_link
);
802 recindex
= hashelem
->recindex
;
803 site
= (uintptr_t) lookup_btrecord(btlog
, recindex
);
806 element
= (site
| kInstanceFlagReferenced
);
808 instances
[numSites
] = INSTANCE_PUT(element
);
812 for (idx
= 0; idx
< numSites
; idx
++) {
813 site
= instances
[idx
];
817 if (!(kInstanceFlagReferenced
& site
)) {
820 for (siteCount
= 1, dups
= (idx
+ 1); dups
< numSites
; dups
++) {
821 if (instances
[dups
] == site
) {
826 record
= (typeof(record
))(INSTANCE_PUT(site
) & ~kInstanceFlags
);
827 (*proc
)(refCon
, siteCount
, zoneSize
, (uintptr_t *) &record
->bt
[0], (uint32_t) btlog
->btrecord_btdepth
);
836 * Returns the number of records in the btlog struct.
838 * Called by the mach_zone_get_btlog_records() MIG routine.
841 get_btlog_records_count(btlog_t
*btlog
)
843 if (btlog
->btlog_buffersize
< sizeof(btlog_t
)) {
846 return (btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
;
850 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
851 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
853 * Called by the mach_zone_get_btlog_records() MIG routine.
856 get_btlog_records(btlog_t
*btlog
, zone_btrecord_t
*records
, unsigned int *numrecs
)
858 unsigned int count
, recs_copied
, frame
;
859 zone_btrecord_t
*current_rec
;
860 btlog_record_t
*zstack_record
;
861 btlog_recordindex_t zstack_index
= BTLOG_RECORDINDEX_NONE
;
866 if (btlog
->btlog_buffersize
> sizeof(btlog_t
)) {
867 count
= (unsigned int)((btlog
->btlog_buffersize
- sizeof(btlog_t
)) / btlog
->btrecord_size
);
869 /* Copy out only as many records as the pre-allocated buffer size permits. */
870 if (count
> *numrecs
) {
873 zstack_index
= btlog
->head
;
875 current_rec
= &records
[0];
877 while (recs_copied
< count
&& (zstack_index
!= BTLOG_RECORDINDEX_NONE
)) {
878 zstack_record
= lookup_btrecord(btlog
, zstack_index
);
879 current_rec
->operation_type
= (uint32_t)(zstack_record
->operation
);
880 current_rec
->ref_count
= zstack_record
->ref_count
;
883 while (frame
< MIN(btlog
->btrecord_btdepth
, MAX_ZTRACE_DEPTH
)) {
884 current_rec
->bt
[frame
] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record
->bt
[frame
]);
888 zstack_index
= zstack_record
->next
;
892 *numrecs
= recs_copied
;
897 #endif /* DEBUG || DEVELOPMENT */