]> git.saurik.com Git - apple/libc.git/blame - gen.subproj/stack_logging.c
Libc-166.tar.gz
[apple/libc.git] / gen.subproj / stack_logging.c
CommitLineData
e9ce8d39
A
1/*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23/* Bertrand from vmutils -> CF -> System */
24
25#import "stack_logging.h"
26
27#import <libc.h>
28#import <pthread.h>
29#import <mach/mach.h>
30#include <mach/vm_statistics.h>
31
32extern void spin_lock(int *);
33
34static inline void *allocate_pages(unsigned bytes) {
35 void *address;
36 if (vm_allocate(mach_task_self(), (vm_address_t *)&address, bytes,
37 VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)| TRUE)) {
38 address = 0;
39 }
40 return (void *)address;
41}
42
43static inline void deallocate_pages(void *ptr, unsigned bytes) {
44 vm_deallocate(mach_task_self(), (vm_address_t)ptr, bytes);
45}
46
47static inline void copy_pages(const void *source, void *dest, unsigned bytes) {
48 if (vm_copy(mach_task_self(), (vm_address_t)source, bytes, (vm_address_t)dest)) memmove(dest, source, bytes);
49}
50
51/*************** Recording stack ***********/
52
53static void *first_frame_address(void) {
54#if 0
55 return __builtin_frame_address(1);
56#elif defined(__ppc__)
57 void *addr;
58#warning __builtin_frame_address IS BROKEN IN BEAKER: RADAR #2340421
59 __asm__ volatile("mr %0, r1" : "=r" (addr));
60 return addr;
61#else
62#warning first_frame_address WILL NOT BE FUNCTIONAL ON THIS ARCHITECTURE
63 return NULL;
64#endif
65}
66
67static void *next_frame_address(void *addr) {
68 void *ret;
69#if defined(__MACH__) && defined(__i386__)
70 __asm__ volatile("movl (%1),%0" : "=r" (ret) : "r" (addr));
71#elif defined(__MACH__) && defined(__ppc__)
72 __asm__ volatile("lwz %0,0x0(%1)" : "=r" (ret) : "r" (addr));
73#elif defined(__hpux__)
74 __asm__ volatile("ldw 0x0(%1),%0" : "=r" (ret) : "r" (addr));
75#elif defined(__svr4__)
76 __asm__ volatile("ta 0x3");
77 __asm__ volatile("ld [%1 + 56],%0" : "=r" (ret) : "r" (addr));
78#else
79#error Unknown architecture
80#endif
81 return ret;
82}
83
84#if defined(__i386__) || defined (__m68k__)
85#define FP_LINK_OFFSET 1
86#elif defined(__ppc__)
87#define FP_LINK_OFFSET 2
88#elif defined(__hppa__)
89#define FP_LINK_OFFSET -5
90#elif defined(__sparc__)
91#define FP_LINK_OFFSET 14
92#else
93#error ********** Unimplemented architecture
94#endif
95
96void thread_stack_pcs(vm_address_t *buffer, unsigned max, unsigned *nb) {
97 void *addr;
98 addr = first_frame_address();
99 *nb = 0;
100 while ((addr >= (void *)0x800) && (max--)) {
101 vm_address_t fp_link = (vm_address_t)(((unsigned *)addr)+FP_LINK_OFFSET);
102 void *addr2;
103 buffer[*nb] = *((vm_address_t *)fp_link);
104 (*nb)++;
105 addr2 = next_frame_address(addr);
106#if defined(__ppc__)
107 if ((unsigned)addr2 <= (unsigned)addr) break; // catch bozo frames
108#endif
109 addr = addr2;
110 }
111}
112
113/*************** Uniquing stack ***********/
114
115#define MAX_COLLIDE 8
116
117#define MAX_NUM_PC 512
118
119static int enter_pair_in_table(unsigned *table, unsigned numPages, unsigned *uniquedParent, unsigned thisPC) {
120 // uniquedParent is in-out; return 1 is collisions max not exceeded
121 unsigned base = numPages * vm_page_size / (sizeof(int)*2*2);
122 unsigned hash = base + (((*uniquedParent) << 4) ^ (thisPC >> 2)) % (base - 1); // modulo odd number for hashing
123 unsigned collisions = MAX_COLLIDE;
124 while (collisions--) {
125 unsigned *head = table + hash*2;
126 if (! head[0] && !head[1]) {
127 /* end of chain; store this entry! */
128 /* Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry */
129 head[0] = thisPC;
130 head[1] = *uniquedParent;
131 *uniquedParent = hash;
132 return 1;
133 }
134 if ((head[0] == thisPC) && (head[1] == *uniquedParent)) {
135 /* we found the proper entry, the value for the pair is the entry offset */
136 *uniquedParent = hash;
137 return 1;
138 }
139 hash++;
140 if (hash == base*2) hash = base;
141 }
142 return 0;
143}
144
145unsigned stack_logging_get_unique_stack(unsigned **table, unsigned *table_num_pages, unsigned *stack_entries, unsigned count, unsigned num_hot_to_skip) {
146 unsigned uniquedParent = (unsigned)-1;
147 // we skip the warmest entries that are an artefact of the code
148 while (num_hot_to_skip--) {
149 if (count > 0) { stack_entries++; count--; }
150 }
151 while (count--) {
152 unsigned thisPC = stack_entries[count];
153 while (!enter_pair_in_table(*table, *table_num_pages, &uniquedParent, thisPC)) {
154 unsigned *newTable;
155 unsigned oldBytes = (*table_num_pages) * vm_page_size;
156 newTable = allocate_pages(oldBytes*2);
157 copy_pages(*table, newTable, oldBytes);
158 deallocate_pages(*table, oldBytes);
159 *table_num_pages *= 2;
160 *table = newTable;
161 }
162 }
163 return uniquedParent;
164}
165
166/*************** Logging stack and arguments ***********/
167
168stack_logging_record_list_t *stack_logging_the_record_list = NULL;
169
170int stack_logging_enable_logging = 0;
171
172int stack_logging_dontcompact = 0;
173
174static stack_logging_record_list_t *GrowLogRecords(stack_logging_record_list_t *records, unsigned desiredNumRecords) {
175 stack_logging_record_list_t *new_records;
176 unsigned old_size = records->overall_num_bytes;
177 if (desiredNumRecords*sizeof(stack_logging_record_t)+sizeof(stack_logging_record_list_t) < records->overall_num_bytes) return records;
178 records->overall_num_bytes += records->overall_num_bytes + vm_page_size; // in order to always get an even number of pages
179 new_records = allocate_pages(records->overall_num_bytes);
180 copy_pages(records, new_records, old_size);
181 deallocate_pages(records, old_size);
182 return new_records;
183}
184
185static void prepare_to_log_stack(void) {
186 if (!stack_logging_the_record_list) {
187 unsigned totalSize = 4 * vm_page_size;
188 stack_logging_the_record_list = allocate_pages(totalSize);
189 memset(stack_logging_the_record_list, 0, sizeof(stack_logging_record_list_t));
190 stack_logging_the_record_list->overall_num_bytes = totalSize;
191 stack_logging_the_record_list->uniquing_table_num_pages = 128;
192 stack_logging_the_record_list->uniquing_table = allocate_pages(stack_logging_the_record_list->uniquing_table_num_pages * vm_page_size);
193 }
194}
195
196void stack_logging_log_stack(unsigned type, unsigned arg1, unsigned arg2, unsigned arg3, unsigned result, unsigned num_hot_to_skip) {
197 stack_logging_record_t *rec;
198 if (!stack_logging_enable_logging) return;
199 // printf("stack_logging_log_stack 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
200 if (type & stack_logging_flag_zone) {
201 // just process it now and be done with it!
202 arg1 = arg2; arg2 = arg3; arg3 = 0; type &= ~stack_logging_flag_zone;
203 }
204 if (type & stack_logging_flag_calloc) {
205 // just process it now and be done with it!
206 arg1 *= arg2; arg2 = arg3; arg3 = 0; type &= ~stack_logging_flag_calloc;
207 }
208 if (type & stack_logging_flag_object) {
209 unsigned *class = (unsigned *)arg1;
210 arg1 = arg2 + class[5]; // corresponds to the instance_size field
211 arg2 = 0; arg3 = 0; type = stack_logging_type_alloc;
212 }
213 if (type & stack_logging_flag_cleared) {
214 type &= ~stack_logging_flag_cleared;
215 }
216 if (type & stack_logging_flag_handle) {
217 if (stack_logging_type_alloc) {
218 if (!result) return;
219 stack_logging_log_stack(stack_logging_type_alloc, 0, 0, 0, result, num_hot_to_skip+1);
220 stack_logging_log_stack(stack_logging_type_alloc, arg1, 0, 0, *((int *)result), num_hot_to_skip+1);
221 return;
222 }
223 if (stack_logging_type_dealloc) {
224 if (!arg1) return;
225 stack_logging_log_stack(stack_logging_type_dealloc, *((int *)arg1), 0, 0, 0, num_hot_to_skip+1);
226 stack_logging_log_stack(stack_logging_type_dealloc, arg1, 0, 0, 0, num_hot_to_skip+1);
227 return;
228 }
229 printf("*** Unknown logging type: 0x%x\n", type);
230 }
231 if (type == stack_logging_flag_set_handle_size) {
232 if (!arg1) return;
233 // Thanks to a horrible hack, arg3 contains the prvious handle value
234 if (arg3 == *((int *)arg1)) return;
235 stack_logging_log_stack(stack_logging_type_dealloc, arg3, 0, 0, 0, num_hot_to_skip+1);
236 stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, *((int *)arg1), num_hot_to_skip+1);
237 return;
238 }
239 if (type == (stack_logging_type_dealloc|stack_logging_type_alloc)) {
240 if (arg1 == result) return; // realloc had no effect, skipping
241 if (!arg1) {
242 // realloc(NULL, size) same as malloc(size)
243 type = stack_logging_type_alloc; arg1 = arg2; arg2 = arg3; arg3 = 0;
244 } else {
245 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
246 stack_logging_log_stack(stack_logging_type_dealloc, arg1, 0, 0, 0, num_hot_to_skip+1);
247 stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, result, num_hot_to_skip+1);
248 return;
249 }
250 }
251 if (type == stack_logging_type_dealloc) {
252 // simple free
253 if (!arg1) return; // free(nil)
254 }
255 prepare_to_log_stack();
256 spin_lock(&stack_logging_the_record_list->lock);
257 stack_logging_enable_logging = 0;
258 stack_logging_the_record_list = GrowLogRecords(stack_logging_the_record_list, stack_logging_the_record_list->num_records + 1);
259 rec = stack_logging_the_record_list->records + stack_logging_the_record_list->num_records;
260 // We take care of the common case of alloc-dealloc
261 if (!stack_logging_dontcompact && stack_logging_the_record_list->num_records && (type == stack_logging_type_dealloc) && arg1 && ((rec-1)->type == stack_logging_type_alloc) && (arg1 == STACK_LOGGING_DISGUISE((rec-1)->address))) {
262 stack_logging_the_record_list->num_records--;
263 // printf("Erased previous record in alloc-dealloc sequence\n");
264 } else {
265 unsigned stack_entries[MAX_NUM_PC];
266 unsigned count = 0;
267 rec->type = type;
268 if (type == stack_logging_type_dealloc) {
269 rec->argument = 0;
270 rec->address = STACK_LOGGING_DISGUISE(arg1); // we disguise the address
271 } else if (type == stack_logging_type_alloc) {
272 rec->argument = arg1;
273 rec->address = STACK_LOGGING_DISGUISE(result); // we disguise the address
274 } else {
275 rec->argument = arg2;
276 rec->address = STACK_LOGGING_DISGUISE(arg1); // we disguise the address
277 }
278 // printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
279 thread_stack_pcs(stack_entries, MAX_NUM_PC - 1, &count);
280 // We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
281 stack_entries[count++] = (int)pthread_self() + 1;
282 /* now let's unique the sample */
283 // printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
284 rec->uniqued_stack = stack_logging_get_unique_stack(&stack_logging_the_record_list->uniquing_table, &stack_logging_the_record_list->uniquing_table_num_pages, stack_entries, count, num_hot_to_skip+2); // we additionally skip the warmest 2 entries that are an artefact of the code
285 stack_logging_the_record_list->num_records++;
286 }
287 stack_logging_enable_logging = 1;
288 stack_logging_the_record_list->lock = 0;
289}
290
291static kern_return_t default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) {
292 *ptr = (void *)address;
293 return 0;
294}
295
296static kern_return_t get_remote_records(task_t task, memory_reader_t reader, stack_logging_record_list_t **records) {
297 // sets records
298 vm_address_t *remote_records_address_ref;
299 kern_return_t err;
300 *records = NULL;
301 err = reader(task, (vm_address_t)&stack_logging_the_record_list, sizeof(vm_address_t), (void **)&remote_records_address_ref);
302 if (err) return err;
303 if (!*remote_records_address_ref) {
304 // printf("stack_logging: no stack record\n");
305 return 0;
306 }
307 // printf("stack_logging: stack records at %p\n", (void *)(*remote_records_address_ref));
308 // printf("stack_logging: reading %d bytes\n", sizeof(stack_logging_record_list_t));
309 err = reader(task, *remote_records_address_ref, sizeof(stack_logging_record_list_t), (void **)records); // get the list head
310 if (err) return err;
311 // printf("stack_logging: overall num bytes = %d\n", records->overall_num_bytes);
312 return reader(task, *remote_records_address_ref, (*records)->overall_num_bytes, (void **)records);
313}
314
315kern_return_t stack_logging_get_frames(task_t task, memory_reader_t reader, vm_address_t address, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames) {
316 stack_logging_record_list_t *records;
317 kern_return_t err;
318 unsigned index;
319 unsigned disguised = STACK_LOGGING_DISGUISE(address);
320 if (!reader) reader = default_reader;
321 *num_frames = 0;
322 err = get_remote_records(task, reader, &records);
323 if (err || !records) return err;
324 // printf("stack_logging: %d records\n", records->num_records);
325 index = 0;
326 while (index < records->num_records) {
327 stack_logging_record_t *record = records->records + index;
328 if (record->address == disguised) {
329 return stack_logging_frames_for_uniqued_stack(task, reader, record->uniqued_stack, stack_frames_buffer, max_stack_frames, num_frames);
330 }
331 index++;
332 }
333 fprintf(stderr, "*** stack_logging: no record found for 0x%x\n", address);
334 return 0;
335}
336
337kern_return_t stack_logging_enumerate_records(task_t task, memory_reader_t reader, vm_address_t address, void enumerator(stack_logging_record_t, void *), void *context) {
338 stack_logging_record_list_t *records;
339 kern_return_t err;
340 unsigned index;
341 unsigned disguised = STACK_LOGGING_DISGUISE(address);
342 if (!reader) reader = default_reader;
343 err = get_remote_records(task, reader, &records);
344 if (err || !records) return err;
345 // printf("stack_logging: %d records\n", records->num_records);
346 index = 0;
347 while (index < records->num_records) {
348 stack_logging_record_t *record = records->records + index;
349 if (!address || (record->address == disguised)) enumerator(*record, context);
350 index++;
351 }
352 return 0;
353}
354
355kern_return_t stack_logging_frames_for_uniqued_stack(task_t task, memory_reader_t reader, unsigned uniqued_stack, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames) {
356 stack_logging_record_list_t *records;
357 unsigned *uniquing_table;
358 kern_return_t err;
359 if (!reader) reader = default_reader;
360 *num_frames = 0;
361 err = get_remote_records(task, reader, &records);
362 if (err || !records) return err;
363 err = reader(task, (vm_address_t)records->uniquing_table, records->uniquing_table_num_pages * vm_page_size, (void **)&uniquing_table);
364 if (err) return err;
365 while (max_stack_frames && (uniqued_stack != -1)) {
366 unsigned thisPC;
367 if ((uniqued_stack * 2 + 1) * sizeof(unsigned) >= records->uniquing_table_num_pages * vm_page_size) {
368 fprintf(stderr, "*** stack_logging: Invalid uniqued stack 0x%x", uniqued_stack);
369 break;
370 }
371 thisPC = uniquing_table[uniqued_stack * 2];
372 uniqued_stack = uniquing_table[uniqued_stack * 2 + 1];
373 if (!thisPC && !uniqued_stack) {
374 // Invalid entry
375 fprintf(stderr, "*** stack_logging: Invalid entry 0x%x", thisPC);
376 break;
377 }
378 stack_frames_buffer[0] = thisPC;
379 stack_frames_buffer++;
380 (*num_frames)++;
381 max_stack_frames--;
382 }
383 return 0;
384}