]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/btlog.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / btlog.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <kern/btlog.h>
31 #include <kern/assert.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
34 #include <vm/pmap.h>
35 #include <mach/vm_param.h>
36 #define _SYS_TYPES_H_
37 #include <libkern/crypto/md5.h>
38 #include <libkern/crypto/crypto_internal.h>
39
40 /*
41 * Since all records are located contiguously in memory,
42 * we use indices to them as the primary lookup mechanism,
43 * and to maintain the linked list of active records
44 * in chronological order.
45 */
46 #define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ )
47 #define BTLOG_RECORDINDEX_NONE (0xFFFFFF)
48
49 /*
50 * Each record is a stack with a reference count and a list of
51 * log elements that refer to it.
52 *
53 * Each log element is placed in a hash bucket that is contained
54 * within the btlog structure. It contains the index to the record
55 * that it references.
56 *
57 * So you can go from an address to the corresp. stack by hashing the address,
58 * finding the hash head and traversing the chain of log elements
59 * till you find the hash bucket with an address that matches your
60 * address (if it exists) or creating a new bucket to hold this new address.
61 */
62
63 #define ELEMENT_HASH_BUCKET_COUNT (256)
64 #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE
65
66 #define ZELEMS_DEFAULT (8000)
67 size_t zelems_count = 0;
68
69 typedef uint32_t btlog_recordindex_t; /* only 24 bits used */
70
71 /*
72 * Queue head for the queue of elements connected to a particular record (stack).
73 * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode.
74 */
75 TAILQ_HEAD(_element_record_queue, btlog_element);
76
77 /*
78 * Queue head for the queue of elements that hash to the same bucket.
79 * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO.
80 */
81 TAILQ_HEAD(_element_hash_queue, btlog_element);
82
83 typedef struct btlog_record {
84 btlog_recordindex_t next:24,
85 operation:8;
86 uint32_t ref_count;
87 uint32_t bthash;
88 struct _element_record_queue element_record_queue;
89 void *bt[];/* variable sized, based on btlog_t params */
90 } btlog_record_t;
91
92 typedef struct btlog_element {
93 btlog_recordindex_t recindex:24,
94 operation:8;
95 uintptr_t elem;
96 TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */
97
98 TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain.
99 * During LEAKS mode, this is used as a singly-linked list because
100 * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads.
101 *
102 * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list.
103 */
104 } btlog_element_t;
105
106 struct btlog {
107 vm_address_t btlog_buffer; /* all memory for this btlog_t */
108 vm_size_t btlog_buffersize;
109
110 uintptr_t btrecords; /* use btlog_recordindex_t to lookup */
111 size_t btrecord_btdepth;/* BT entries per record */
112 size_t btrecord_size;
113
114 btlog_recordindex_t head; /* active record list */
115 btlog_recordindex_t tail;
116 btlog_recordindex_t activerecord;
117 btlog_recordindex_t freelist_records;
118
119 size_t active_record_count;
120 size_t active_element_count;
121 btlog_element_t *freelist_elements;
122 union {
123 btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */
124 struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */
125 } elem_linkage_un;
126
127 decl_simple_lock_data(, btlog_lock);
128 boolean_t caller_will_remove_entries_for_element;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements.
129 * And so they want to be in charge of explicitly removing elements. Depending on this variable we
130 * will choose what kind of data structure to use for the elem_linkage_un union above.
131 */
132 };
133
134 extern boolean_t vm_kernel_ready;
135 extern boolean_t kmem_alloc_ready;
136
137 #define lookup_btrecord(btlog, index) \
138 ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size))
139
140 uint32_t calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog);
141 uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount);
142
143 void btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *hash_elem);
144 btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog);
145
146 uint32_t
147 lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount)
148 {
149 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
150 btlog_record_t *record = NULL;
151 size_t i = 0;
152 boolean_t stack_matched = TRUE;
153
154 assert(btcount);
155 assert(bt);
156
157 recindex = btlog->head;
158 record = lookup_btrecord(btlog, recindex);
159 while (recindex != BTLOG_RECORDINDEX_NONE) {
160 assert(!TAILQ_EMPTY(&record->element_record_queue));
161 if (record->bthash == md5_hash) {
162 /*
163 * Make sure that the incoming stack actually matches the
164 * stack in this record. Since we only save off a
165 * part of the md5 hash there can be collisions sometimes.
166 * This comparison isn't costly because, in case of collisions,
167 * usually the first few frames are different.
168 */
169
170 stack_matched = TRUE;
171
172 if (btcount < btlog->btrecord_btdepth) {
173 if (record->bt[btcount] != NULL) {
174 /*
175 * If the stack depth passed in is smaller than
176 * the recorded stack and we have a valid pointer
177 * in the recorded stack at that depth, then we
178 * don't need to do any further checks.
179 */
180 stack_matched = FALSE;
181 goto next;
182 }
183 }
184
185 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
186 if (record->bt[i] != bt[i]) {
187 stack_matched = FALSE;
188 goto next;
189 }
190 }
191
192 if (stack_matched == TRUE) {
193 break;
194 }
195 }
196 next:
197 recindex = record->next;
198 record = lookup_btrecord(btlog, recindex);
199 }
200
201 return recindex;
202 }
203
204 uint32_t
205 calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog)
206 {
207 if (btlog->caller_will_remove_entries_for_element) {
208 uint32_t addr = 0;
209
210 addr = (uint32_t) ((elem & 0xFF00) >> 0x8);
211
212 return addr;
213 } else {
214 return 0;
215 }
216 }
217
218 static void
219 btlog_lock(btlog_t *btlog)
220 {
221 simple_lock(&btlog->btlog_lock, LCK_GRP_NULL);
222 }
223 static void
224 btlog_unlock(btlog_t *btlog)
225 {
226 simple_unlock(&btlog->btlog_lock);
227 }
228
229 btlog_t *
230 btlog_create(size_t numrecords,
231 size_t record_btdepth,
232 boolean_t caller_will_remove_entries_for_element)
233 {
234 btlog_t *btlog;
235 vm_size_t buffersize_needed = 0, elemsize_needed = 0;
236 vm_address_t buffer = 0, elem_buffer = 0, elem_hash_buffer = 0;
237 size_t i = 0;
238 kern_return_t ret;
239 size_t btrecord_size = 0;
240 uintptr_t free_elem = 0, next_free_elem = 0;
241
242 if (vm_kernel_ready && !kmem_alloc_ready) {
243 return NULL;
244 }
245
246 if (numrecords > BTLOG_MAX_RECORDS) {
247 return NULL;
248 }
249
250 if (numrecords == 0) {
251 return NULL;
252 }
253
254 if (record_btdepth > BTLOG_MAX_DEPTH) {
255 return NULL;
256 }
257
258 /* btlog_record_t is variable-sized, calculate needs now */
259 btrecord_size = sizeof(btlog_record_t)
260 + sizeof(void *) * record_btdepth;
261
262 buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size;
263 buffersize_needed = round_page(buffersize_needed);
264
265 if (zelems_count == 0) {
266 zelems_count = ((max_mem + (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT;
267
268 if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) {
269 /*
270 * Need a max? With this scheme, it should be possible to tune the default
271 * so that we don't need a boot-arg to request more elements.
272 */
273 printf("Set number of log elements per btlog to: %ld\n", zelems_count);
274 }
275 }
276 elemsize_needed = sizeof(btlog_element_t) * zelems_count;
277 elemsize_needed = round_page(elemsize_needed);
278
279 /* since rounding to a page size might hold more, recalculate */
280 numrecords = MIN(BTLOG_MAX_RECORDS,
281 (buffersize_needed - sizeof(btlog_t)) / btrecord_size);
282
283 if (kmem_alloc_ready) {
284 ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG);
285 if (ret != KERN_SUCCESS) {
286 return NULL;
287 }
288
289 ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG);
290 if (ret != KERN_SUCCESS) {
291 kmem_free(kernel_map, buffer, buffersize_needed);
292 buffer = 0;
293 return NULL;
294 }
295
296 if (caller_will_remove_entries_for_element == TRUE) {
297 ret = kmem_alloc(kernel_map, &elem_hash_buffer, ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
298 } else {
299 ret = kmem_alloc(kernel_map, &elem_hash_buffer, 2 * sizeof(btlog_element_t*), VM_KERN_MEMORY_DIAG);
300 }
301
302 if (ret != KERN_SUCCESS) {
303 kmem_free(kernel_map, buffer, buffersize_needed);
304 buffer = 0;
305
306 kmem_free(kernel_map, elem_buffer, elemsize_needed);
307 elem_buffer = 0;
308 return NULL;
309 }
310 } else {
311 buffer = (vm_address_t)pmap_steal_memory(buffersize_needed);
312 elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed);
313 if (caller_will_remove_entries_for_element == TRUE) {
314 elem_hash_buffer = (vm_address_t)pmap_steal_memory(ELEMENT_HASH_BUCKET_COUNT * sizeof(btlog_element_t*));
315 } else {
316 elem_hash_buffer = (vm_address_t)pmap_steal_memory(2 * sizeof(btlog_element_t*));
317 }
318 ret = KERN_SUCCESS;
319 }
320
321 btlog = (btlog_t *)buffer;
322 btlog->btlog_buffer = buffer;
323 btlog->btlog_buffersize = buffersize_needed;
324 btlog->freelist_elements = (btlog_element_t *)elem_buffer;
325
326 simple_lock_init(&btlog->btlog_lock, 0);
327
328 btlog->caller_will_remove_entries_for_element = caller_will_remove_entries_for_element;
329
330 if (caller_will_remove_entries_for_element == TRUE) {
331 btlog->elem_linkage_un.elem_recindex_hashtbl = (btlog_element_t **)elem_hash_buffer;
332 } else {
333 btlog->elem_linkage_un.element_hash_queue = (struct _element_hash_queue*) elem_hash_buffer;
334 TAILQ_INIT(btlog->elem_linkage_un.element_hash_queue);
335 }
336
337 btlog->btrecords = (uintptr_t)(buffer + sizeof(btlog_t));
338 btlog->btrecord_btdepth = record_btdepth;
339 btlog->btrecord_size = btrecord_size;
340
341 btlog->head = BTLOG_RECORDINDEX_NONE;
342 btlog->tail = BTLOG_RECORDINDEX_NONE;
343 btlog->active_record_count = 0;
344 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
345
346 for (i = 0; i < ELEMENT_HASH_BUCKET_COUNT; i++) {
347 btlog->elem_linkage_un.elem_recindex_hashtbl[i] = 0;
348 }
349
350 /* populate freelist_records with all records in order */
351 btlog->freelist_records = 0;
352 for (i = 0; i < (numrecords - 1); i++) {
353 btlog_record_t *rec = lookup_btrecord(btlog, i);
354 rec->next = (btlog_recordindex_t)(i + 1);
355 }
356 lookup_btrecord(btlog, i)->next = BTLOG_RECORDINDEX_NONE; /* terminate */
357
358 /* populate freelist_elements with all elements in order */
359 free_elem = (uintptr_t)btlog->freelist_elements;
360
361 for (i = 0; i < (zelems_count - 1); i++) {
362 next_free_elem = free_elem + sizeof(btlog_element_t);
363 *(uintptr_t*)free_elem = next_free_elem;
364 free_elem = next_free_elem;
365 }
366 *(uintptr_t*)next_free_elem = BTLOG_HASHELEMINDEX_NONE;
367
368 return btlog;
369 }
370
371 /* Assumes btlog is already locked */
372 static btlog_recordindex_t
373 btlog_get_record_from_freelist(btlog_t *btlog)
374 {
375 btlog_recordindex_t recindex = btlog->freelist_records;
376
377 if (recindex == BTLOG_RECORDINDEX_NONE) {
378 /* nothing on freelist */
379 return BTLOG_RECORDINDEX_NONE;
380 } else {
381 /* remove the head of the freelist_records */
382 btlog_record_t *record = lookup_btrecord(btlog, recindex);
383 btlog->freelist_records = record->next;
384 return recindex;
385 }
386 }
387
388 static void
389 btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex)
390 {
391 btlog_recordindex_t precindex = BTLOG_RECORDINDEX_NONE;
392 btlog_record_t *precord = NULL, *record = NULL;
393
394 record = lookup_btrecord(btlog, recindex);
395
396 assert(TAILQ_EMPTY(&record->element_record_queue));
397
398 record->bthash = 0;
399
400 precindex = btlog->head;
401 precord = lookup_btrecord(btlog, precindex);
402
403 if (precindex == recindex) {
404 btlog->head = precord->next;
405 btlog->active_record_count--;
406
407 record->next = btlog->freelist_records;
408 btlog->freelist_records = recindex;
409
410 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
411 /* active list is now empty, update tail */
412 btlog->tail = BTLOG_RECORDINDEX_NONE;
413 assert(btlog->active_record_count == 0);
414 }
415 } else {
416 while (precindex != BTLOG_RECORDINDEX_NONE) {
417 if (precord->next == recindex) {
418 precord->next = record->next;
419 btlog->active_record_count--;
420
421 record->next = btlog->freelist_records;
422 btlog->freelist_records = recindex;
423
424 if (btlog->tail == recindex) {
425 btlog->tail = precindex;
426 }
427 break;
428 } else {
429 precindex = precord->next;
430 precord = lookup_btrecord(btlog, precindex);
431 }
432 }
433 }
434 }
435
436
437 /* Assumes btlog is already locked */
438 static void
439 btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict)
440 {
441 btlog_recordindex_t recindex = btlog->head;
442 btlog_record_t *record = NULL;
443 btlog_element_t *recelem = NULL;
444
445 if (recindex == BTLOG_RECORDINDEX_NONE) {
446 /* nothing on active list */
447 panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog);
448 } else {
449 while (num_elements_to_evict) {
450 /*
451 * LEAKS: reap the oldest element within the record with the lowest refs.
452 * CORRUPTION: reap the oldest element overall and drop its reference on the record
453 */
454
455 if (btlog->caller_will_remove_entries_for_element) {
456 uint32_t max_refs_threshold = UINT32_MAX;
457 btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0;
458
459 prev_evictindex = evict_index = btlog->head;
460 precindex = recindex = btlog->head;
461
462 while (recindex != BTLOG_RECORDINDEX_NONE) {
463 record = lookup_btrecord(btlog, recindex);
464
465 if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) {
466 /* skip this record */
467 } else {
468 prev_evictindex = precindex;
469 evict_index = recindex;
470 max_refs_threshold = record->ref_count;
471 }
472
473 if (record->next != BTLOG_RECORDINDEX_NONE) {
474 precindex = recindex;
475 }
476
477 recindex = record->next;
478 }
479
480 recindex = evict_index;
481 assert(recindex != BTLOG_RECORDINDEX_NONE);
482 record = lookup_btrecord(btlog, recindex);
483
484 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
485 } else {
486 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
487 recindex = recelem->recindex;
488 record = lookup_btrecord(btlog, recindex);
489 }
490
491 /*
492 * Here we have the element to drop (recelem), its record and the record index.
493 */
494
495 while (recelem && num_elements_to_evict) {
496 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
497
498 if (btlog->caller_will_remove_entries_for_element) {
499 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
500 uint32_t hashidx = 0;
501
502 hashidx = calculate_hashidx_for_element(~recelem->elem, btlog);
503
504 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
505 while (hashelem != NULL) {
506 if (hashelem == recelem) {
507 break;
508 } else {
509 prev_hashelem = hashelem;
510 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
511 }
512 }
513
514 if (hashelem == NULL) {
515 panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record);
516 }
517
518 if (prev_hashelem != hashelem) {
519 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
520 } else {
521 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
522 }
523 } else {
524 TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link);
525 }
526
527 btlog_add_elem_to_freelist(btlog, recelem);
528 btlog->active_element_count--;
529
530 num_elements_to_evict--;
531
532 assert(record->ref_count);
533
534 record->ref_count--;
535
536 if (record->ref_count == 0) {
537 btlog_add_record_to_freelist(btlog, recindex);
538
539 /*
540 * LEAKS: All done with this record. Need the next least popular record.
541 * CORRUPTION: We don't care about records. We'll just pick the next oldest element.
542 */
543
544 if (btlog->caller_will_remove_entries_for_element) {
545 break;
546 }
547 }
548
549 if (btlog->caller_will_remove_entries_for_element) {
550 recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue);
551 } else {
552 recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue);
553 recindex = recelem->recindex;
554 record = lookup_btrecord(btlog, recindex);
555 }
556 }
557 }
558 }
559 }
560
561 /* Assumes btlog is already locked */
562 static void
563 btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex)
564 {
565 assert(recindex != BTLOG_RECORDINDEX_NONE);
566
567 if (btlog->head == BTLOG_RECORDINDEX_NONE) {
568 /* empty active list, update both head and tail */
569 btlog->head = btlog->tail = recindex;
570 } else {
571 btlog_record_t *record = lookup_btrecord(btlog, btlog->tail);
572 record->next = recindex;
573 btlog->tail = recindex;
574 }
575 btlog->active_record_count++;
576 }
577
578 btlog_element_t*
579 btlog_get_elem_from_freelist(btlog_t *btlog)
580 {
581 btlog_element_t *free_elem = NULL;
582
583 retry:
584 free_elem = btlog->freelist_elements;
585
586 if ((uintptr_t)free_elem == BTLOG_HASHELEMINDEX_NONE) {
587 /* nothing on freelist */
588 btlog_evict_elements_from_record(btlog, 1);
589 goto retry;
590 } else {
591 /* remove the head of the freelist */
592 uintptr_t next_elem = *(uintptr_t*)free_elem;
593 btlog->freelist_elements = (btlog_element_t *)next_elem;
594 return free_elem;
595 }
596 }
597
598 void
599 btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem)
600 {
601 btlog_element_t *free_elem = btlog->freelist_elements;
602
603 TAILQ_NEXT(elem, element_hash_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
604 TAILQ_NEXT(elem, element_record_link) = (btlog_element_t *) BTLOG_HASHELEMINDEX_NONE;
605
606 *(uintptr_t*)elem = (uintptr_t)free_elem;
607 btlog->freelist_elements = elem;
608 }
609
610 void
611 btlog_add_entry(btlog_t *btlog,
612 void *element,
613 uint8_t operation,
614 void *bt[],
615 size_t btcount)
616 {
617 btlog_recordindex_t recindex = 0;
618 btlog_record_t *record = NULL;
619 size_t i;
620 u_int32_t md5_buffer[4];
621 MD5_CTX btlog_ctx;
622 uint32_t hashidx = 0;
623
624 btlog_element_t *hashelem = NULL;
625
626 if (g_crypto_funcs == NULL) {
627 return;
628 }
629
630 btlog_lock(btlog);
631
632 MD5Init(&btlog_ctx);
633 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
634 MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i]));
635 }
636 MD5Final((u_char *) &md5_buffer, &btlog_ctx);
637
638 recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount);
639
640 if (recindex != BTLOG_RECORDINDEX_NONE) {
641 record = lookup_btrecord(btlog, recindex);
642 record->ref_count++;
643 assert(record->operation == operation);
644 } else {
645 retry:
646 /* If there's a free record, use it */
647 recindex = btlog_get_record_from_freelist(btlog);
648 if (recindex == BTLOG_RECORDINDEX_NONE) {
649 /* Use the first active record (FIFO age-out) */
650 btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t)) / sizeof(btlog_element_t)));
651 goto retry;
652 }
653
654 record = lookup_btrecord(btlog, recindex);
655
656 /* we always add to the tail, so there is no next pointer */
657 record->next = BTLOG_RECORDINDEX_NONE;
658 record->operation = operation;
659 record->bthash = md5_buffer[0];
660 record->ref_count = 1;
661 TAILQ_INIT(&record->element_record_queue);
662
663 for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) {
664 record->bt[i] = bt[i];
665 }
666
667 for (; i < btlog->btrecord_btdepth; i++) {
668 record->bt[i] = NULL;
669 }
670
671 btlog_append_record_to_activelist(btlog, recindex);
672 }
673
674 btlog->activerecord = recindex;
675
676 hashidx = calculate_hashidx_for_element((uintptr_t)element, btlog);
677 hashelem = btlog_get_elem_from_freelist(btlog);
678
679 hashelem->elem = ~((uintptr_t)element);
680 hashelem->operation = record->operation;
681 hashelem->recindex = recindex;
682
683 TAILQ_INSERT_HEAD(&record->element_record_queue, hashelem, element_record_link);
684
685 if (btlog->caller_will_remove_entries_for_element) {
686 TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
687 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem;
688 } else {
689 TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link);
690 }
691
692 btlog->active_element_count++;
693
694 btlog->activerecord = BTLOG_RECORDINDEX_NONE;
695
696 btlog_unlock(btlog);
697 }
698
699 void
700 btlog_remove_entries_for_element(btlog_t *btlog,
701 void *element)
702 {
703 btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE;
704 btlog_record_t *record = NULL;
705 uint32_t hashidx = 0;
706
707 btlog_element_t *prev_hashelem = NULL, *hashelem = NULL;
708
709 if (btlog->caller_will_remove_entries_for_element == FALSE) {
710 panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog);
711 }
712
713 if (g_crypto_funcs == NULL) {
714 return;
715 }
716
717 btlog_lock(btlog);
718
719 hashidx = calculate_hashidx_for_element((uintptr_t) element, btlog);
720 prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
721
722 while (hashelem != NULL) {
723 if (~hashelem->elem == (uintptr_t)element) {
724 break;
725 } else {
726 prev_hashelem = hashelem;
727 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
728 }
729 }
730
731 if (hashelem) {
732 btlog_element_t *recelem = NULL;
733
734 if (prev_hashelem != hashelem) {
735 TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link);
736 } else {
737 btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link);
738 }
739
740 recindex = hashelem->recindex;
741 record = lookup_btrecord(btlog, recindex);
742
743 recelem = hashelem;
744 TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link);
745
746 btlog_add_elem_to_freelist(btlog, hashelem);
747 btlog->active_element_count--;
748
749 assert(record->ref_count);
750
751 record->ref_count--;
752
753 if (record->ref_count == 0) {
754 btlog_add_record_to_freelist(btlog, recindex);
755 }
756 }
757
758 btlog_unlock(btlog);
759 }
760
761 #if DEBUG || DEVELOPMENT
762
763 void
764 btlog_copy_backtraces_for_elements(btlog_t * btlog,
765 uintptr_t * instances,
766 uint32_t * countp,
767 uint32_t zoneSize,
768 leak_site_proc proc,
769 void * refCon)
770 {
771 btlog_recordindex_t recindex;
772 btlog_record_t * record;
773 btlog_element_t * hashelem;
774 uint32_t hashidx, idx, dups, numSites, siteCount;
775 uintptr_t element, site;
776 uint32_t count;
777
778 btlog_lock(btlog);
779
780 count = *countp;
781 for (numSites = 0, idx = 0; idx < count; idx++) {
782 element = instances[idx];
783
784 if (kInstanceFlagReferenced & element) {
785 continue;
786 }
787 element = INSTANCE_PUT(element) & ~kInstanceFlags;
788
789 site = 0;
790 hashidx = calculate_hashidx_for_element(element, btlog);
791 hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx];
792 while (hashelem != NULL) {
793 if (~hashelem->elem == element) {
794 break;
795 }
796 hashelem = TAILQ_NEXT(hashelem, element_hash_link);
797 }
798 if (hashelem) {
799 recindex = hashelem->recindex;
800 site = (uintptr_t) lookup_btrecord(btlog, recindex);
801 }
802 if (site) {
803 element = (site | kInstanceFlagReferenced);
804 }
805 instances[numSites] = INSTANCE_PUT(element);
806 numSites++;
807 }
808
809 for (idx = 0; idx < numSites; idx++) {
810 site = instances[idx];
811 if (!site) {
812 continue;
813 }
814 if (!(kInstanceFlagReferenced & site)) {
815 continue;
816 }
817 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
818 if (instances[dups] == site) {
819 siteCount++;
820 instances[dups] = 0;
821 }
822 }
823 record = (typeof(record))(INSTANCE_PUT(site) & ~kInstanceFlags);
824 (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth);
825 }
826
827 *countp = numSites;
828
829 btlog_unlock(btlog);
830 }
831
832 /*
833 * Returns the number of records in the btlog struct.
834 *
835 * Called by the mach_zone_get_btlog_records() MIG routine.
836 */
837 size_t
838 get_btlog_records_count(btlog_t *btlog)
839 {
840 if (btlog->btlog_buffersize < sizeof(btlog_t)) {
841 return 0;
842 }
843 return (btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size;
844 }
845
846 /*
847 * Copies out relevant info from btlog_record_t's to zone_btrecord_t's. 'numrecs' points to the number of records
848 * the 'records' buffer can hold. Upon return 'numrecs' points to the number of records actually copied out.
849 *
850 * Called by the mach_zone_get_btlog_records() MIG routine.
851 */
852 void
853 get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrecs)
854 {
855 unsigned int count, recs_copied, frame;
856 zone_btrecord_t *current_rec;
857 btlog_record_t *zstack_record;
858 btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE;
859
860 btlog_lock(btlog);
861
862 count = 0;
863 if (btlog->btlog_buffersize > sizeof(btlog_t)) {
864 count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size);
865 }
866 /* Copy out only as many records as the pre-allocated buffer size permits. */
867 if (count > *numrecs) {
868 count = *numrecs;
869 }
870 zstack_index = btlog->head;
871
872 current_rec = &records[0];
873 recs_copied = 0;
874 while (recs_copied < count && (zstack_index != BTLOG_RECORDINDEX_NONE)) {
875 zstack_record = lookup_btrecord(btlog, zstack_index);
876 current_rec->operation_type = (uint32_t)(zstack_record->operation);
877 current_rec->ref_count = zstack_record->ref_count;
878
879 frame = 0;
880 while (frame < MIN(btlog->btrecord_btdepth, MAX_ZTRACE_DEPTH)) {
881 current_rec->bt[frame] = (uint64_t)VM_KERNEL_UNSLIDE(zstack_record->bt[frame]);
882 frame++;
883 }
884
885 zstack_index = zstack_record->next;
886 recs_copied++;
887 current_rec++;
888 }
889 *numrecs = recs_copied;
890
891 btlog_unlock(btlog);
892 }
893
894 #endif /* DEBUG || DEVELOPMENT */