]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/btlog.c
584be02cf5532d1a6115c32522761a03cead0b64
[apple/xnu.git] / osfmk / kern / btlog.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
34 #include <vm/pmap.h>
35 #include <mach/vm_param.h>
36 #define _SYS_TYPES_H_
37 #include <libkern/crypto/md5.h>
38 #include <libkern/crypto/crypto_internal.h>
39
40 /*
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
45 */
46 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
47 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
48
49 /*
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
52 *
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
55 * that it references.
56 *
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
61 */
62
63 #define ELEMENT_HASH_BUCKET_COUNT (256)
64 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
65
66 #define ZELEMS_DEFAULT (8000)
67 size_t zelems_count = 0;
68
69 typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
70
71 /*
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
74 */
75 TAILQ_HEAD(_element_record_queue, btlog_element);
76
77 /*
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
80 */
81 TAILQ_HEAD(_element_hash_queue, btlog_element);
82
83 typedef struct btlog_record {
84 btlog_recordindex_t next:24,
85 operation:8;
86 uint32_t ref_count;
87 uint32_t bthash;
88 struct _element_record_queue element_record_queue;
89 void *bt[];/* variable sized, based on btlog_t params */
90 } btlog_record_t;
91
92 typedef struct btlog_element {
93 btlog_recordindex_t recindex:24,
94 operation:8;
95 uintptr_t elem;
96 TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */
97
98 TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
101 *
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
103 */
104 } btlog_element_t;
105
106 struct btlog {
107 vm_address_t btlog_buffer; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize;
109
110 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth;/* BT entries per record */
112 size_t btrecord_size;
113
114 btlog_recordindex_t head; /* active record list */
115 btlog_recordindex_t tail;
116 btlog_recordindex_t activerecord;
117 btlog_recordindex_t freelist_records;
118
119 size_t active_record_count;
120 size_t active_element_count;
121 btlog_element_t *freelist_elements;
122 union {
123 btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
125 } elem_linkage_un;
126
127 decl_simple_lock_data(, btlog_lock);
128 boolean_t caller_will_remove_entries_for_element;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
131 */
132 };
133
134 extern boolean_t vm_kernel_ready;
135 extern boolean_t kmem_alloc_ready;
136
137 #define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
139
140 uint32_t calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog);
141 uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount);
142
143 void btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *hash_elem);
144 btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog);
145
146 uint32_t
147 lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount)
148 {
149 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
150 btlog_record_t *record = NULL;
151 size_t i = 0;
152 boolean_t stack_matched = TRUE;
153
154 assert(btcount);
155 assert(bt);
156
157 recindex = btlog->head;
158 record = lookup_btrecord(btlog, recindex);
159 while (recindex != BTLOG_RECORDINDEX_NONE) {
160 assert(record->bthash);
161 assert(!TAILQ_EMPTY(&record->element_record_queue));
162 if (record->bthash == md5_hash) {
163 /*
164 * Make sure that the incoming stack actually matches the
165 * stack in this record. Since we only save off a
166 * part of the md5 hash there can be collisions sometimes.
167 * This comparison isn't costly because, in case of collisions,
168 * usually the first few frames are different.
169 */
170
171 stack_matched = TRUE;
172
173 if (btcount < btlog->btrecord_btdepth) {
174 if (record->bt[btcount] != NULL) {
175 /*
176 * If the stack depth passed in is smaller than
177 * the recorded stack and we have a valid pointer
178 * in the recorded stack at that depth, then we
179 * don't need to do any further checks.
180 */
181 stack_matched = FALSE;
182 goto next;
183 }
184 }
185
186 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
187 if (record->bt[i] != bt[i]) {
188 stack_matched = FALSE;
189 goto next;
190 }
191 }
192
193 if (stack_matched == TRUE) {
194 break;
195 }
196 }
197 next:
198 recindex = record->next;
199 record = lookup_btrecord(btlog, recindex);
200 }
201
202 return recindex;
203 }
204
205 uint32_t
206 calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog)
207 {
208 if (btlog->caller_will_remove_entries_for_element) {
209 uint32_t addr = 0;
210
211 addr = (uint32_t) ((elem & 0xFF00) >> 0x8);
212
213 return addr;
214 } else {
215 return 0;
216 }
217 }
218
219 static void
220 btlog_lock(btlog_t *btlog)
221 {
222 simple_lock(&btlog->btlog_lock, LCK_GRP_NULL);
223 }
224 static void
225 btlog_unlock(btlog_t *btlog)
226 {
227 simple_unlock(&btlog->btlog_lock);
228 }
229
230 btlog_t *
231 btlog_create(size_t numrecords,
232 size_t record_btdepth,
233 boolean_t caller_will_remove_entries_for_element)
234 {
235 btlog_t *btlog;
236 vm_size_t buffersize_needed = 0, elemsize_needed = 0;
237 vm_address_t buffer = 0, elem_buffer = 0, elem_hash_buffer = 0;
238 size_t i = 0;
239 kern_return_t ret;
240 size_t btrecord_size = 0;
241 uintptr_t free_elem = 0, next_free_elem = 0;
242
243 if (vm_kernel_ready && !kmem_alloc_ready) {
244 return NULL;
245 }
246
247 if (numrecords > BTLOG_MAX_RECORDS) {
248 return NULL;
249 }
250
251 if (numrecords == 0) {
252 return NULL;
253 }
254
255 if (record_btdepth > BTLOG_MAX_DEPTH) {
256 return NULL;
257 }
258
259 /* btlog_record_t is variable-sized, calculate needs now */
260 btrecord_size = sizeof(btlog_record_t)
261 + sizeof(void *) * record_btdepth;
262
263 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
264 buffersize_needed = round_page(buffersize_needed);
265
266 if (zelems_count == 0) {
267 zelems_count = ((max_mem + (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT;
268
269 if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) {
270 /*
271 * Need a max? With this scheme, it should be possible to tune the default
272 * so that we don't need a boot-arg to request more elements.
273 */
274 printf("Set number of log elements per btlog to: %ld\n", zelems_count);
275 }
276 }
277 elemsize_needed = sizeof(btlog_element_t) * zelems_count;
278 elemsize_needed = round_page(elemsize_needed);
279
280 /* since rounding to a page size might hold more, recalculate */
281 numrecords = MIN(BTLOG_MAX_RECORDS,
282 (buffersize_needed - sizeof(btlog_t)) / btrecord_size);
283
284 if (kmem_alloc_ready) {
285 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG);
286 if (ret != KERN_SUCCESS) {
287 return NULL;
288 }
289
290 ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG);
291 if (ret != KERN_SUCCESS) {
292 kmem_free(kernel_map, buffer, buffersize_needed);
293 buffer = 0;
294 return NULL;
295 }
296
297 if (caller_will_remove_entries_for_element == TRUE) {
298 ret = kmem_alloc(kernel_map, &elem_hash_buffer, ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
299 } else {
300 ret = kmem_alloc(kernel_map, &elem_hash_buffer, 2 * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
301 }
302
303 if (ret != KERN_SUCCESS) {
304 kmem_free(kernel_map, buffer, buffersize_needed);
305 buffer = 0;
306
307 kmem_free(kernel_map, elem_buffer, elemsize_needed);
308 elem_buffer = 0;
309 return NULL;
310 }
311 } else {
312 buffer = (vm_address_t)pmap_steal_memory(buffersize_needed);
313 elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed);
314 if (caller_will_remove_entries_for_element == TRUE) {
315 elem_hash_buffer = (vm_address_t)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*));
316 } else {
317 elem_hash_buffer = (vm_address_t)pmap_steal_memory(2 * sizeof(btlog_element_t*));
318 }
319 ret = KERN_SUCCESS;
320 }
321
322 btlog = (btlog_t *)buffer;
323 btlog->btlog_buffer = buffer;
324 btlog->btlog_buffersize = buffersize_needed;
325 btlog->freelist_elements = (btlog_element_t *)elem_buffer;
326
327 simple_lock_init(&btlog->btlog_lock, 0);
328
329 btlog->caller_will_remove_entries_for_element = caller_will_remove_entries_for_element;
330
331 if (caller_will_remove_entries_for_element == TRUE) {
332 btlog->elem_linkage_un.elem_recindex_hashtbl = (btlog_element_t **)elem_hash_buffer;
333 } else {
334 btlog->elem_linkage_un.element_hash_queue = (struct _element_hash_queue*) elem_hash_buffer;
335 TAILQ_INIT(btlog->elem_linkage_un.element_hash_queue);
336 }
337
338 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
339 btlog->btrecord_btdepth = record_btdepth;
340 btlog->btrecord_size = btrecord_size;
341
342 btlog->head = BTLOG_RECORDINDEX_NONE;
343 btlog->tail = BTLOG_RECORDINDEX_NONE;
344 btlog->active_record_count = 0;
345 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
346
347 for (i = 0; i < ELEMENT_HASH_BUCKET_COUNT; i++) {
348 btlog->elem_linkage_un.elem_recindex_hashtbl[i] = 0;
349 }
350
351 /* populate freelist_records with all records in order */
352 btlog->freelist_records = 0;
353 for (i = 0; i < (numrecords - 1); i++) {
354 btlog_record_t *rec = lookup_btrecord(btlog, i);
355 rec->next = (btlog_recordindex_t)(i + 1);
356 }
357 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
358
359 /* populate freelist_elements with all elements in order */
360 free_elem = (uintptr_t)btlog->freelist_elements;
361
362 for (i = 0; i < (zelems_count - 1); i++) {
363 next_free_elem = free_elem + sizeof(btlog_element_t);
364 *(uintptr_t*)free_elem = next_free_elem;
365 free_elem = next_free_elem;
366 }
367 *(uintptr_t*)next_free_elem = BTLOG_HASHELEMINDEX_NONE;
368
369 return btlog;
370 }
371
372 /* Assumes btlog is already locked */
373 static btlog_recordindex_t
374 btlog_get_record_from_freelist(btlog_t *btlog)
375 {
376 btlog_recordindex_t recindex = btlog->freelist_records;
377
378 if (recindex == BTLOG_RECORDINDEX_NONE) {
379 /* nothing on freelist */
380 return BTLOG_RECORDINDEX_NONE;
381 } else {
382 /* remove the head of the freelist_records */
383 btlog_record_t *record = lookup_btrecord(btlog, recindex);
384 btlog->freelist_records = record->next;
385 return recindex;
386 }
387 }
388
389 static void
390 btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex)
391 {
392 btlog_recordindex_t precindex = BTLOG_RECORDINDEX_NONE;
393 btlog_record_t *precord = NULL, *record = NULL;
394
395 record = lookup_btrecord(btlog, recindex);
396
397 assert(TAILQ_EMPTY(&record->element_record_queue));
398
399 record->bthash = 0;
400
401 precindex = btlog->head;
402 precord = lookup_btrecord(btlog, precindex);
403
404 if (precindex == recindex) {
405 btlog->head = precord->next;
406 btlog->active_record_count--;
407
408 record->next = btlog->freelist_records;
409 btlog->freelist_records = recindex;
410
411 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
412 /* active list is now empty, update tail */
413 btlog->tail = BTLOG_RECORDINDEX_NONE;
414 assert(btlog->active_record_count == 0);
415 }
416 } else {
417 while (precindex != BTLOG_RECORDINDEX_NONE) {
418 if (precord->next == recindex) {
419 precord->next = record->next;
420 btlog->active_record_count--;
421
422 record->next = btlog->freelist_records;
423 btlog->freelist_records = recindex;
424
425 if (btlog->tail == recindex) {
426 btlog->tail = precindex;
427 }
428 break;
429 } else {
430 precindex = precord->next;
431 precord = lookup_btrecord(btlog, precindex);
432 }
433 }
434 }
435 }
436
437
438 /* Assumes btlog is already locked */
439 static void
440 btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict)
441 {
442 btlog_recordindex_t recindex = btlog->head;
443 btlog_record_t *record = NULL;
444 btlog_element_t *recelem = NULL;
445
446 if (recindex == BTLOG_RECORDINDEX_NONE) {
447 /* nothing on active list */
448 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog);
449 } else {
450 while (num_elements_to_evict) {
451 /*
452 * LEAKS: reap the oldest element within the record with the lowest refs.
453 * CORRUPTION: reap the oldest element overall and drop its reference on the record
454 */
455
456 if (btlog->caller_will_remove_entries_for_element) {
457 uint32_t max_refs_threshold = UINT32_MAX;
458 btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0;
459
460 prev_evictindex = evict_index = btlog->head;
461 precindex = recindex = btlog->head;
462
463 while (recindex != BTLOG_RECORDINDEX_NONE) {
464 record = lookup_btrecord(btlog, recindex);
465
466 if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) {
467 /* skip this record */
468 } else {
469 prev_evictindex = precindex;
470 evict_index = recindex;
471 max_refs_threshold = record->ref_count;
472 }
473
474 if (record->next != BTLOG_RECORDINDEX_NONE) {
475 precindex = recindex;
476 }
477
478 recindex = record->next;
479 }
480
481 recindex = evict_index;
482 assert(recindex != BTLOG_RECORDINDEX_NONE);
483 record = lookup_btrecord(btlog, recindex);
484
485 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
486 } else {
487 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
488 recindex = recelem->recindex;
489 record = lookup_btrecord(btlog, recindex);
490 }
491
492 /*
493 * Here we have the element to drop (recelem), its record and the record index.
494 */
495
496 while (recelem && num_elements_to_evict) {
497 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
498
499 if (btlog->caller_will_remove_entries_for_element) {
500 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
501 uint32_t hashidx = 0;
502
503 hashidx = calculate_hashidx_for_element(~recelem->elem, btlog);
504
505 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
506 while (hashelem != NULL) {
507 if (hashelem == recelem) {
508 break;
509 } else {
510 prev_hashelem = hashelem;
511 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
512 }
513 }
514
515 if (hashelem == NULL) {
516 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record);
517 }
518
519 if (prev_hashelem != hashelem) {
520 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
521 } else {
522 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
523 }
524 } else {
525 TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link);
526 }
527
528 btlog_add_elem_to_freelist(btlog, recelem);
529 btlog->active_element_count--;
530
531 num_elements_to_evict--;
532
533 assert(record->ref_count);
534
535 record->ref_count--;
536
537 if (record->ref_count == 0) {
538 btlog_add_record_to_freelist(btlog, recindex);
539
540 /*
541 * LEAKS: All done with this record. Need the next least popular record.
542 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
543 */
544
545 if (btlog->caller_will_remove_entries_for_element) {
546 break;
547 }
548 }
549
550 if (btlog->caller_will_remove_entries_for_element) {
551 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
552 } else {
553 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
554 recindex = recelem->recindex;
555 record = lookup_btrecord(btlog, recindex);
556 }
557 }
558 }
559 }
560 }
561
562 /* Assumes btlog is already locked */
563 static void
564 btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
565 {
566 assert(recindex != BTLOG_RECORDINDEX_NONE);
567
568 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
569 /* empty active list, update both head and tail */
570 btlog->head = btlog->tail = recindex;
571 } else {
572 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
573 record->next = recindex;
574 btlog->tail = recindex;
575 }
576 btlog->active_record_count++;
577 }
578
579 btlog_element_t*
580 btlog_get_elem_from_freelist(btlog_t *btlog)
581 {
582 btlog_element_t *free_elem = NULL;
583
584 retry:
585 free_elem = btlog->freelist_elements;
586
587 if ((uintptr_t)free_elem == BTLOG_HASHELEMINDEX_NONE) {
588 /* nothing on freelist */
589 btlog_evict_elements_from_record(btlog, 1);
590 goto retry;
591 } else {
592 /* remove the head of the freelist */
593 uintptr_t next_elem = *(uintptr_t*)free_elem;
594 btlog->freelist_elements = (btlog_element_t *)next_elem;
595 return free_elem;
596 }
597 }
598
599 void
600 btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem)
601 {
602 btlog_element_t *free_elem = btlog->freelist_elements;
603
604 TAILQ_NEXT(elem, element_hash_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
605 TAILQ_NEXT(elem, element_record_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
606
607 *(uintptr_t*)elem = (uintptr_t)free_elem;
608 btlog->freelist_elements = elem;
609 }
610
611 void
612 btlog_add_entry(btlog_t *btlog,
613 void *element,
614 uint8_t operation,
615 void *bt[],
616 size_t btcount)
617 {
618 btlog_recordindex_t recindex = 0;
619 btlog_record_t *record = NULL;
620 size_t i;
621 u_int32_t md5_buffer[4];
622 MD5_CTX btlog_ctx;
623 uint32_t hashidx = 0;
624
625 btlog_element_t *hashelem = NULL;
626
627 if (g_crypto_funcs == NULL) {
628 return;
629 }
630
631 btlog_lock(btlog);
632
633 MD5Init(&btlog_ctx);
634 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
635 MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i]));
636 }
637 MD5Final((u_char *) &md5_buffer, &btlog_ctx);
638
639 recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount);
640
641 if (recindex != BTLOG_RECORDINDEX_NONE) {
642 record = lookup_btrecord(btlog, recindex);
643 record->ref_count++;
644 assert(record->operation == operation);
645 } else {
646 retry:
647 /* If there's a free record, use it */
648 recindex = btlog_get_record_from_freelist(btlog);
649 if (recindex == BTLOG_RECORDINDEX_NONE) {
650 /* Use the first active record (FIFO age-out) */
651 btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t)) / sizeof(btlog_element_t)));
652 goto retry;
653 }
654
655 record = lookup_btrecord(btlog, recindex);
656
657 /* we always add to the tail, so there is no next pointer */
658 record->next = BTLOG_RECORDINDEX_NONE;
659 record->operation = operation;
660 record->bthash = md5_buffer[0];
661 record->ref_count = 1;
662 TAILQ_INIT(&record->element_record_queue);
663
664 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
665 record->bt[i] = bt[i];
666 }
667
668 for (; i < btlog->btrecord_btdepth; i++) {
669 record->bt[i] = NULL;
670 }
671
672 btlog_append_record_to_activelist(btlog, recindex);
673 }
674
675 btlog->activerecord = recindex;
676
677 hashidx = calculate_hashidx_for_element((uintptr_t)element, btlog);
678 hashelem = btlog_get_elem_from_freelist(btlog);
679
680 assert(record->bthash);
681
682 hashelem->elem = ~((uintptr_t)element);
683 hashelem->operation = record->operation;
684 hashelem->recindex = recindex;
685
686 TAILQ_INSERT_HEAD(&record->element_record_queue, hashelem, element_record_link);
687
688 if (btlog->caller_will_remove_entries_for_element) {
689 TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
690 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem;
691 } else {
692 TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link);
693 }
694
695 btlog->active_element_count++;
696
697 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
698
699 btlog_unlock(btlog);
700 }
701
702 void
703 btlog_remove_entries_for_element(btlog_t *btlog,
704 void *element)
705 {
706 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
707 btlog_record_t *record = NULL;
708 uint32_t hashidx = 0;
709
710 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
711
712 if (btlog->caller_will_remove_entries_for_element == FALSE) {
713 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog);
714 }
715
716 if (g_crypto_funcs == NULL) {
717 return;
718 }
719
720 btlog_lock(btlog);
721
722 hashidx = calculate_hashidx_for_element((uintptr_t) element, btlog);
723 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
724
725 while (hashelem != NULL) {
726 if (~hashelem->elem == (uintptr_t)element) {
727 break;
728 } else {
729 prev_hashelem = hashelem;
730 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
731 }
732 }
733
734 if (hashelem) {
735 btlog_element_t *recelem = NULL;
736
737 if (prev_hashelem != hashelem) {
738 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
739 } else {
740 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
741 }
742
743 recindex = hashelem->recindex;
744 record = lookup_btrecord(btlog, recindex);
745
746 recelem = hashelem;
747 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
748
749 btlog_add_elem_to_freelist(btlog, hashelem);
750 btlog->active_element_count--;
751
752 assert(record->ref_count);
753
754 record->ref_count--;
755
756 if (record->ref_count == 0) {
757 btlog_add_record_to_freelist(btlog, recindex);
758 }
759 }
760
761 btlog_unlock(btlog);
762 }
763
764 #if DEBUG || DEVELOPMENT
765
766 void
767 btlog_copy_backtraces_for_elements(btlog_t * btlog,
768 uintptr_t * instances,
769 uint32_t * countp,
770 uint32_t zoneSize,
771 leak_site_proc proc,
772 void * refCon)
773 {
774 btlog_recordindex_t recindex;
775 btlog_record_t * record;
776 btlog_element_t * hashelem;
777 uint32_t hashidx, idx, dups, numSites, siteCount;
778 uintptr_t element, site;
779 uint32_t count;
780
781 btlog_lock(btlog);
782
783 count = *countp;
784 for (numSites = 0, idx = 0; idx < count; idx++) {
785 element = instances[idx];
786
787 if (kInstanceFlagReferenced & element) {
788 continue;
789 }
790 element = INSTANCE_PUT(element) & ~kInstanceFlags;
791
792 site = 0;
793 hashidx = calculate_hashidx_for_element(element, btlog);
794 hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
795 while (hashelem != NULL) {
796 if (~hashelem->elem == element) {
797 break;
798 }
799 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
800 }
801 if (hashelem) {
802 recindex = hashelem->recindex;
803 site = (uintptr_t) lookup_btrecord(btlog, recindex);
804 }
805 if (site) {
806 element = (site | kInstanceFlagReferenced);
807 }
808 instances[numSites] = INSTANCE_PUT(element);
809 numSites++;
810 }
811
812 for (idx = 0; idx < numSites; idx++) {
813 site = instances[idx];
814 if (!site) {
815 continue;
816 }
817 if (!(kInstanceFlagReferenced & site)) {
818 continue;
819 }
820 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
821 if (instances[dups] == site) {
822 siteCount++;
823 instances[dups] = 0;
824 }
825 }
826 record = (typeof(record))(INSTANCE_PUT(site) & ~kInstanceFlags);
827 (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth);
828 }
829
830 *countp = numSites;
831
832 btlog_unlock(btlog);
833 }
834
835 /*
836 * Returns the number of records in the btlog struct.
837 *
838 * Called by the mach_zone_get_btlog_records() MIG routine.
839 */
840 size_t
841 get_btlog_records_count(btlog_t *btlog)
842 {
843 if (btlog->btlog_buffersize < sizeof(btlog_t)) {
844 return 0;
845 }
846 return (btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size;
847 }
848
849 /*
850 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
851 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
852 *
853 * Called by the mach_zone_get_btlog_records() MIG routine.
854 */
855 void
856 get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrecs)
857 {
858 unsigned int count, recs_copied, frame;
859 zone_btrecord_t *current_rec;
860 btlog_record_t *zstack_record;
861 btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE;
862
863 btlog_lock(btlog);
864
865 count = 0;
866 if (btlog->btlog_buffersize > sizeof(btlog_t)) {
867 count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size);
868 }
869 /* Copy out only as many records as the pre-allocated buffer size permits. */
870 if (count > *numrecs) {
871 count = *numrecs;
872 }
873 zstack_index = btlog->head;
874
875 current_rec = &records[0];
876 recs_copied = 0;
877 while (recs_copied < count && (zstack_index != BTLOG_RECORDINDEX_NONE)) {
878 zstack_record = lookup_btrecord(btlog, zstack_index);
879 current_rec->operation_type = (uint32_t)(zstack_record->operation);
880 current_rec->ref_count = zstack_record->ref_count;
881
882 frame = 0;
883 while (frame < MIN(btlog->btrecord_btdepth, MAX_ZTRACE_DEPTH)) {
884 current_rec->bt[frame] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record->bt[frame]);
885 frame++;
886 }
887
888 zstack_index = zstack_record->next;
889 recs_copied++;
890 current_rec++;
891 }
892 *numrecs = recs_copied;
893
894 btlog_unlock(btlog);
895 }
896
897 #endif /* DEBUG || DEVELOPMENT */