]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/btlog.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / kern / btlog.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stddef.h>
30#include <kern/btlog.h>
31#include <kern/assert.h>
32#include <kern/startup.h>
33#include <vm/vm_kern.h>
34#include <vm/vm_map.h>
35#include <vm/pmap.h>
36#include <mach/vm_param.h>
37#define _SYS_TYPES_H_
38#include <libkern/crypto/md5.h>
39#include <libkern/crypto/crypto_internal.h>
40
41/*
42 * Since all records are located contiguously in memory,
43 * we use indices to them as the primary lookup mechanism,
44 * and to maintain the linked list of active records
45 * in chronological order.
46 */
47#define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
48#define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
49
50/*
51 * Each record is a stack with a reference count and a list of
52 * log elements that refer to it.
53 *
54 * Each log element is placed in a hash bucket that is contained
55 * within the btlog structure. It contains the index to the record
56 * that it references.
57 *
58 * So you can go from an address to the corresp. stack by hashing the address,
59 * finding the hash head and traversing the chain of log elements
60 * till you find the hash bucket with an address that matches your
61 * address (if it exists) or creating a new bucket to hold this new address.
62 */
63
64#define ELEMENT_HASH_BUCKET_COUNT (256)
65#define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
66
67#define ZELEMS_DEFAULT (8000)
68size_t zelems_count = 0;
69
70typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
71
72/*
73 * Queue head for the queue of elements connected to a particular record (stack).
74 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
75 */
76TAILQ_HEAD(_element_record_queue, btlog_element);
77
78/*
79 * Queue head for the queue of elements that hash to the same bucket.
80 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
81 */
82TAILQ_HEAD(_element_hash_queue, btlog_element);
83
84typedef struct btlog_record {
85 btlog_recordindex_t next:24,
86 operation:8;
87 uint32_t ref_count;
88 uint32_t bthash;
89 struct _element_record_queue element_record_queue;
90 void *bt[];/* variable sized, based on btlog_t params */
91} btlog_record_t;
92
93typedef struct btlog_element {
94 btlog_recordindex_t recindex:24,
95 operation:8;
96 uintptr_t elem;
97 TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */
98
99 TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain.
100 * During LEAKS mode, this is used as a singly-linked list because
101 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
102 *
103 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
104 */
105} btlog_element_t;
106
107struct btlog {
108 vm_address_t btlog_buffer; /* all memory for this btlog_t */
109 vm_size_t btlog_buffersize;
110
111 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
112 size_t btrecord_btdepth;/* BT entries per record */
113 size_t btrecord_size;
114
115 btlog_recordindex_t head; /* active record list */
116 btlog_recordindex_t tail;
117 btlog_recordindex_t activerecord;
118 btlog_recordindex_t freelist_records;
119
120 size_t active_record_count;
121 size_t active_element_count;
122 btlog_element_t *freelist_elements;
123 union {
124 btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
125 struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
126 } elem_linkage_un;
127
128 decl_simple_lock_data(, btlog_lock);
129 boolean_t caller_will_remove_entries_for_element;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
130 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
131 * will choose what kind of data structure to use for the elem_linkage_un union above.
132 */
133};
134
135#define lookup_btrecord(btlog, index) \
136 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
137
138uint32_t calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog);
139uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount);
140
141void btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *hash_elem);
142btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog);
143
144uint32_t
145lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount)
146{
147 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
148 btlog_record_t *record = NULL;
149 size_t i = 0;
150 boolean_t stack_matched = TRUE;
151
152 assert(btcount);
153 assert(bt);
154
155 recindex = btlog->head;
156 record = lookup_btrecord(btlog, recindex);
157 while (recindex != BTLOG_RECORDINDEX_NONE) {
158 assert(!TAILQ_EMPTY(&record->element_record_queue));
159 if (record->bthash == md5_hash) {
160 /*
161 * Make sure that the incoming stack actually matches the
162 * stack in this record. Since we only save off a
163 * part of the md5 hash there can be collisions sometimes.
164 * This comparison isn't costly because, in case of collisions,
165 * usually the first few frames are different.
166 */
167
168 stack_matched = TRUE;
169
170 if (btcount < btlog->btrecord_btdepth) {
171 if (record->bt[btcount] != NULL) {
172 /*
173 * If the stack depth passed in is smaller than
174 * the recorded stack and we have a valid pointer
175 * in the recorded stack at that depth, then we
176 * don't need to do any further checks.
177 */
178 stack_matched = FALSE;
179 goto next;
180 }
181 }
182
183 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
184 if (record->bt[i] != bt[i]) {
185 stack_matched = FALSE;
186 goto next;
187 }
188 }
189
190 if (stack_matched == TRUE) {
191 break;
192 }
193 }
194next:
195 recindex = record->next;
196 record = lookup_btrecord(btlog, recindex);
197 }
198
199 return recindex;
200}
201
202uint32_t
203calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog)
204{
205 if (btlog->caller_will_remove_entries_for_element) {
206 uint32_t addr = 0;
207
208 addr = (uint32_t) ((elem & 0xFF00) >> 0x8);
209
210 return addr;
211 } else {
212 return 0;
213 }
214}
215
216static void
217btlog_lock(btlog_t *btlog)
218{
219 simple_lock(&btlog->btlog_lock, LCK_GRP_NULL);
220}
221static void
222btlog_unlock(btlog_t *btlog)
223{
224 simple_unlock(&btlog->btlog_lock);
225}
226
227btlog_t *
228btlog_create(size_t numrecords,
229 size_t record_btdepth,
230 boolean_t caller_will_remove_entries_for_element)
231{
232 btlog_t *btlog;
233 vm_size_t buffersize_needed = 0, elemsize_needed = 0;
234 vm_address_t buffer = 0, elem_buffer = 0, elem_hash_buffer = 0;
235 size_t i = 0;
236 kern_return_t ret;
237 size_t btrecord_size = 0;
238 uintptr_t free_elem = 0, next_free_elem = 0;
239
240 if (startup_phase >= STARTUP_SUB_VM_KERNEL &&
241 startup_phase < STARTUP_SUB_KMEM_ALLOC) {
242 return NULL;
243 }
244
245 if (numrecords > BTLOG_MAX_RECORDS) {
246 return NULL;
247 }
248
249 if (numrecords == 0) {
250 return NULL;
251 }
252
253 if (record_btdepth > BTLOG_MAX_DEPTH) {
254 return NULL;
255 }
256
257 /* btlog_record_t is variable-sized, calculate needs now */
258 btrecord_size = sizeof(btlog_record_t)
259 + sizeof(void *) * record_btdepth;
260
261 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
262 buffersize_needed = round_page(buffersize_needed);
263
264 if (zelems_count == 0) {
265 zelems_count = ((max_mem + (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT;
266
267 if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) {
268 /*
269 * Need a max? With this scheme, it should be possible to tune the default
270 * so that we don't need a boot-arg to request more elements.
271 */
272 printf("Set number of log elements per btlog to: %ld\n", zelems_count);
273 }
274 }
275 elemsize_needed = sizeof(btlog_element_t) * zelems_count;
276 elemsize_needed = round_page(elemsize_needed);
277
278 /* since rounding to a page size might hold more, recalculate */
279 numrecords = MIN(BTLOG_MAX_RECORDS,
280 (buffersize_needed - sizeof(btlog_t)) / btrecord_size);
281
282 if (__probable(startup_phase >= STARTUP_SUB_KMEM_ALLOC)) {
283 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG);
284 if (ret != KERN_SUCCESS) {
285 return NULL;
286 }
287
288 ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG);
289 if (ret != KERN_SUCCESS) {
290 kmem_free(kernel_map, buffer, buffersize_needed);
291 buffer = 0;
292 return NULL;
293 }
294
295 if (caller_will_remove_entries_for_element == TRUE) {
296 ret = kmem_alloc(kernel_map, &elem_hash_buffer, ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
297 } else {
298 ret = kmem_alloc(kernel_map, &elem_hash_buffer, 2 * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
299 }
300
301 if (ret != KERN_SUCCESS) {
302 kmem_free(kernel_map, buffer, buffersize_needed);
303 buffer = 0;
304
305 kmem_free(kernel_map, elem_buffer, elemsize_needed);
306 elem_buffer = 0;
307 return NULL;
308 }
309 } else {
310 buffer = (vm_address_t)pmap_steal_memory(buffersize_needed);
311 elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed);
312 if (caller_will_remove_entries_for_element == TRUE) {
313 elem_hash_buffer = (vm_address_t)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*));
314 } else {
315 elem_hash_buffer = (vm_address_t)pmap_steal_memory(2 * sizeof(btlog_element_t*));
316 }
317 ret = KERN_SUCCESS;
318 }
319
320 btlog = (btlog_t *)buffer;
321 btlog->btlog_buffer = buffer;
322 btlog->btlog_buffersize = buffersize_needed;
323 btlog->freelist_elements = (btlog_element_t *)elem_buffer;
324
325 simple_lock_init(&btlog->btlog_lock, 0);
326
327 btlog->caller_will_remove_entries_for_element = caller_will_remove_entries_for_element;
328
329 if (caller_will_remove_entries_for_element == TRUE) {
330 btlog->elem_linkage_un.elem_recindex_hashtbl = (btlog_element_t **)elem_hash_buffer;
331 } else {
332 btlog->elem_linkage_un.element_hash_queue = (struct _element_hash_queue*) elem_hash_buffer;
333 TAILQ_INIT(btlog->elem_linkage_un.element_hash_queue);
334 }
335
336 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
337 btlog->btrecord_btdepth = record_btdepth;
338 btlog->btrecord_size = btrecord_size;
339
340 btlog->head = BTLOG_RECORDINDEX_NONE;
341 btlog->tail = BTLOG_RECORDINDEX_NONE;
342 btlog->active_record_count = 0;
343 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
344
345 for (i = 0; i < ELEMENT_HASH_BUCKET_COUNT; i++) {
346 btlog->elem_linkage_un.elem_recindex_hashtbl[i] = 0;
347 }
348
349 /* populate freelist_records with all records in order */
350 btlog->freelist_records = 0;
351 for (i = 0; i < (numrecords - 1); i++) {
352 btlog_record_t *rec = lookup_btrecord(btlog, i);
353 rec->next = (btlog_recordindex_t)(i + 1);
354 }
355 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
356
357 /* populate freelist_elements with all elements in order */
358 free_elem = (uintptr_t)btlog->freelist_elements;
359
360 for (i = 0; i < (zelems_count - 1); i++) {
361 next_free_elem = free_elem + sizeof(btlog_element_t);
362 *(uintptr_t*)free_elem = next_free_elem;
363 free_elem = next_free_elem;
364 }
365 *(uintptr_t*)next_free_elem = BTLOG_HASHELEMINDEX_NONE;
366
367 return btlog;
368}
369
370/* Assumes btlog is already locked */
371static btlog_recordindex_t
372btlog_get_record_from_freelist(btlog_t *btlog)
373{
374 btlog_recordindex_t recindex = btlog->freelist_records;
375
376 if (recindex == BTLOG_RECORDINDEX_NONE) {
377 /* nothing on freelist */
378 return BTLOG_RECORDINDEX_NONE;
379 } else {
380 /* remove the head of the freelist_records */
381 btlog_record_t *record = lookup_btrecord(btlog, recindex);
382 btlog->freelist_records = record->next;
383 return recindex;
384 }
385}
386
387static void
388btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex)
389{
390 btlog_recordindex_t precindex = BTLOG_RECORDINDEX_NONE;
391 btlog_record_t *precord = NULL, *record = NULL;
392
393 record = lookup_btrecord(btlog, recindex);
394
395 assert(TAILQ_EMPTY(&record->element_record_queue));
396
397 record->bthash = 0;
398
399 precindex = btlog->head;
400 precord = lookup_btrecord(btlog, precindex);
401
402 if (precindex == recindex) {
403 btlog->head = precord->next;
404 btlog->active_record_count--;
405
406 record->next = btlog->freelist_records;
407 btlog->freelist_records = recindex;
408
409 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
410 /* active list is now empty, update tail */
411 btlog->tail = BTLOG_RECORDINDEX_NONE;
412 assert(btlog->active_record_count == 0);
413 }
414 } else {
415 while (precindex != BTLOG_RECORDINDEX_NONE) {
416 if (precord->next == recindex) {
417 precord->next = record->next;
418 btlog->active_record_count--;
419
420 record->next = btlog->freelist_records;
421 btlog->freelist_records = recindex;
422
423 if (btlog->tail == recindex) {
424 btlog->tail = precindex;
425 }
426 break;
427 } else {
428 precindex = precord->next;
429 precord = lookup_btrecord(btlog, precindex);
430 }
431 }
432 }
433}
434
435
436/* Assumes btlog is already locked */
437static void
438btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict)
439{
440 btlog_recordindex_t recindex = btlog->head;
441 btlog_record_t *record = NULL;
442 btlog_element_t *recelem = NULL;
443
444 if (recindex == BTLOG_RECORDINDEX_NONE) {
445 /* nothing on active list */
446 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog);
447 } else {
448 while (num_elements_to_evict) {
449 /*
450 * LEAKS: reap the oldest element within the record with the lowest refs.
451 * CORRUPTION: reap the oldest element overall and drop its reference on the record
452 */
453
454 if (btlog->caller_will_remove_entries_for_element) {
455 uint32_t max_refs_threshold = UINT32_MAX;
456 btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0;
457
458 prev_evictindex = evict_index = btlog->head;
459 precindex = recindex = btlog->head;
460
461 while (recindex != BTLOG_RECORDINDEX_NONE) {
462 record = lookup_btrecord(btlog, recindex);
463
464 if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) {
465 /* skip this record */
466 } else {
467 prev_evictindex = precindex;
468 evict_index = recindex;
469 max_refs_threshold = record->ref_count;
470 }
471
472 if (record->next != BTLOG_RECORDINDEX_NONE) {
473 precindex = recindex;
474 }
475
476 recindex = record->next;
477 }
478
479 recindex = evict_index;
480 assert(recindex != BTLOG_RECORDINDEX_NONE);
481 record = lookup_btrecord(btlog, recindex);
482
483 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
484 } else {
485 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
486 recindex = recelem->recindex;
487 record = lookup_btrecord(btlog, recindex);
488 }
489
490 /*
491 * Here we have the element to drop (recelem), its record and the record index.
492 */
493
494 while (recelem && num_elements_to_evict) {
495 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
496
497 if (btlog->caller_will_remove_entries_for_element) {
498 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
499 uint32_t hashidx = 0;
500
501 hashidx = calculate_hashidx_for_element(~recelem->elem, btlog);
502
503 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
504 while (hashelem != NULL) {
505 if (hashelem == recelem) {
506 break;
507 } else {
508 prev_hashelem = hashelem;
509 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
510 }
511 }
512
513 if (hashelem == NULL) {
514 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record);
515 }
516
517 if (prev_hashelem != hashelem) {
518 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
519 } else {
520 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
521 }
522 } else {
523 TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link);
524 }
525
526 btlog_add_elem_to_freelist(btlog, recelem);
527 btlog->active_element_count--;
528
529 num_elements_to_evict--;
530
531 assert(record->ref_count);
532
533 record->ref_count--;
534
535 if (record->ref_count == 0) {
536 btlog_add_record_to_freelist(btlog, recindex);
537
538 /*
539 * LEAKS: All done with this record. Need the next least popular record.
540 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
541 */
542
543 if (btlog->caller_will_remove_entries_for_element) {
544 break;
545 }
546 }
547
548 if (btlog->caller_will_remove_entries_for_element) {
549 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
550 } else {
551 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
552 recindex = recelem->recindex;
553 record = lookup_btrecord(btlog, recindex);
554 }
555 }
556 }
557 }
558}
559
560/* Assumes btlog is already locked */
561static void
562btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
563{
564 assert(recindex != BTLOG_RECORDINDEX_NONE);
565
566 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
567 /* empty active list, update both head and tail */
568 btlog->head = btlog->tail = recindex;
569 } else {
570 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
571 record->next = recindex;
572 btlog->tail = recindex;
573 }
574 btlog->active_record_count++;
575}
576
577btlog_element_t*
578btlog_get_elem_from_freelist(btlog_t *btlog)
579{
580 btlog_element_t *free_elem = NULL;
581
582retry:
583 free_elem = btlog->freelist_elements;
584
585 if ((uintptr_t)free_elem == BTLOG_HASHELEMINDEX_NONE) {
586 /* nothing on freelist */
587 btlog_evict_elements_from_record(btlog, 1);
588 goto retry;
589 } else {
590 /* remove the head of the freelist */
591 uintptr_t next_elem = *(uintptr_t*)free_elem;
592 btlog->freelist_elements = (btlog_element_t *)next_elem;
593 return free_elem;
594 }
595}
596
597void
598btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem)
599{
600 btlog_element_t *free_elem = btlog->freelist_elements;
601
602 TAILQ_NEXT(elem, element_hash_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
603 TAILQ_NEXT(elem, element_record_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
604
605 *(uintptr_t*)elem = (uintptr_t)free_elem;
606 btlog->freelist_elements = elem;
607}
608
609void
610btlog_add_entry(btlog_t *btlog,
611 void *element,
612 uint8_t operation,
613 void *bt[],
614 size_t btcount)
615{
616 btlog_recordindex_t recindex = 0;
617 btlog_record_t *record = NULL;
618 size_t i;
619 u_int32_t md5_buffer[4];
620 MD5_CTX btlog_ctx;
621 uint32_t hashidx = 0;
622
623 btlog_element_t *hashelem = NULL;
624
625 if (g_crypto_funcs == NULL) {
626 return;
627 }
628
629 btlog_lock(btlog);
630
631 MD5Init(&btlog_ctx);
632 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
633 MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i]));
634 }
635 MD5Final((u_char *) &md5_buffer, &btlog_ctx);
636
637 recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount);
638
639 if (recindex != BTLOG_RECORDINDEX_NONE) {
640 record = lookup_btrecord(btlog, recindex);
641 record->ref_count++;
642 assert(record->operation == operation);
643 } else {
644retry:
645 /* If there's a free record, use it */
646 recindex = btlog_get_record_from_freelist(btlog);
647 if (recindex == BTLOG_RECORDINDEX_NONE) {
648 /* Use the first active record (FIFO age-out) */
649 btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t)) / sizeof(btlog_element_t)));
650 goto retry;
651 }
652
653 record = lookup_btrecord(btlog, recindex);
654
655 /* we always add to the tail, so there is no next pointer */
656 record->next = BTLOG_RECORDINDEX_NONE;
657 record->operation = operation;
658 record->bthash = md5_buffer[0];
659 record->ref_count = 1;
660 TAILQ_INIT(&record->element_record_queue);
661
662 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
663 record->bt[i] = bt[i];
664 }
665
666 for (; i < btlog->btrecord_btdepth; i++) {
667 record->bt[i] = NULL;
668 }
669
670 btlog_append_record_to_activelist(btlog, recindex);
671 }
672
673 btlog->activerecord = recindex;
674
675 hashidx = calculate_hashidx_for_element((uintptr_t)element, btlog);
676 hashelem = btlog_get_elem_from_freelist(btlog);
677
678 hashelem->elem = ~((uintptr_t)element);
679 hashelem->operation = record->operation;
680 hashelem->recindex = recindex;
681
682 TAILQ_INSERT_HEAD(&record->element_record_queue, hashelem, element_record_link);
683
684 if (btlog->caller_will_remove_entries_for_element) {
685 TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
686 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem;
687 } else {
688 TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link);
689 }
690
691 btlog->active_element_count++;
692
693 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
694
695 btlog_unlock(btlog);
696}
697
698void
699btlog_remove_entries_for_element(btlog_t *btlog,
700 void *element)
701{
702 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
703 btlog_record_t *record = NULL;
704 uint32_t hashidx = 0;
705
706 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
707
708 if (btlog->caller_will_remove_entries_for_element == FALSE) {
709 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog);
710 }
711
712 if (g_crypto_funcs == NULL) {
713 return;
714 }
715
716 btlog_lock(btlog);
717
718 hashidx = calculate_hashidx_for_element((uintptr_t) element, btlog);
719 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
720
721 while (hashelem != NULL) {
722 if (~hashelem->elem == (uintptr_t)element) {
723 break;
724 } else {
725 prev_hashelem = hashelem;
726 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
727 }
728 }
729
730 if (hashelem) {
731 btlog_element_t *recelem = NULL;
732
733 if (prev_hashelem != hashelem) {
734 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
735 } else {
736 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
737 }
738
739 recindex = hashelem->recindex;
740 record = lookup_btrecord(btlog, recindex);
741
742 recelem = hashelem;
743 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
744
745 btlog_add_elem_to_freelist(btlog, hashelem);
746 btlog->active_element_count--;
747
748 assert(record->ref_count);
749
750 record->ref_count--;
751
752 if (record->ref_count == 0) {
753 btlog_add_record_to_freelist(btlog, recindex);
754 }
755 }
756
757 btlog_unlock(btlog);
758}
759
760#if DEBUG || DEVELOPMENT
761
762void
763btlog_copy_backtraces_for_elements(btlog_t * btlog,
764 uintptr_t * instances,
765 uint32_t * countp,
766 uint32_t zoneSize,
767 leak_site_proc proc,
768 void * refCon)
769{
770 btlog_recordindex_t recindex;
771 btlog_record_t * record;
772 btlog_element_t * hashelem;
773 uint32_t hashidx, idx, dups, numSites, siteCount;
774 uintptr_t element, site;
775 uint32_t count;
776
777 btlog_lock(btlog);
778
779 count = *countp;
780 for (numSites = 0, idx = 0; idx < count; idx++) {
781 element = instances[idx];
782
783 if (kInstanceFlagReferenced & element) {
784 continue;
785 }
786 element = INSTANCE_PUT(element) & ~kInstanceFlags;
787
788 site = 0;
789 hashidx = calculate_hashidx_for_element(element, btlog);
790 hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
791 while (hashelem != NULL) {
792 if (~hashelem->elem == element) {
793 break;
794 }
795 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
796 }
797 if (hashelem) {
798 recindex = hashelem->recindex;
799 site = (uintptr_t) lookup_btrecord(btlog, recindex);
800 }
801 if (site) {
802 element = (site | kInstanceFlagReferenced);
803 }
804 instances[numSites] = INSTANCE_PUT(element);
805 numSites++;
806 }
807
808 for (idx = 0; idx < numSites; idx++) {
809 site = instances[idx];
810 if (!site) {
811 continue;
812 }
813 if (!(kInstanceFlagReferenced & site)) {
814 continue;
815 }
816 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
817 if (instances[dups] == site) {
818 siteCount++;
819 instances[dups] = 0;
820 }
821 }
822 record = (typeof(record))(INSTANCE_PUT(site) & ~kInstanceFlags);
823 (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth);
824 }
825
826 *countp = numSites;
827
828 btlog_unlock(btlog);
829}
830
831/*
832 * Returns the number of records in the btlog struct.
833 *
834 * Called by the mach_zone_get_btlog_records() MIG routine.
835 */
836size_t
837get_btlog_records_count(btlog_t *btlog)
838{
839 if (btlog->btlog_buffersize < sizeof(btlog_t)) {
840 return 0;
841 }
842 return (btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size;
843}
844
845/*
846 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
847 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
848 *
849 * Called by the mach_zone_get_btlog_records() MIG routine.
850 */
851void
852get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrecs)
853{
854 unsigned int count, recs_copied, frame;
855 zone_btrecord_t *current_rec;
856 btlog_record_t *zstack_record;
857 btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE;
858
859 btlog_lock(btlog);
860
861 count = 0;
862 if (btlog->btlog_buffersize > sizeof(btlog_t)) {
863 count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size);
864 }
865 /* Copy out only as many records as the pre-allocated buffer size permits. */
866 if (count > *numrecs) {
867 count = *numrecs;
868 }
869 zstack_index = btlog->head;
870
871 current_rec = &records[0];
872 recs_copied = 0;
873 while (recs_copied < count && (zstack_index != BTLOG_RECORDINDEX_NONE)) {
874 zstack_record = lookup_btrecord(btlog, zstack_index);
875 current_rec->operation_type = (uint32_t)(zstack_record->operation);
876 current_rec->ref_count = zstack_record->ref_count;
877
878 frame = 0;
879 while (frame < MIN(btlog->btrecord_btdepth, MAX_ZTRACE_DEPTH)) {
880 current_rec->bt[frame] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record->bt[frame]);
881 frame++;
882 }
883
884 zstack_index = zstack_record->next;
885 recs_copied++;
886 current_rec++;
887 }
888 *numrecs = recs_copied;
889
890 btlog_unlock(btlog);
891}
892
893#endif /* DEBUG || DEVELOPMENT */