]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging.c
Libc-320.tar.gz
[apple/libc.git] / gen / stack_logging.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 /* Bertrand from vmutils -> CF -> System */
27
28 #import "stack_logging.h"
29
30 #import <libc.h>
31 #import <pthread.h>
32 #import <mach/mach.h>
33 #include <mach/vm_statistics.h>
34 #import <malloc/malloc.h>
35 #import <stdlib.h>
36
37 extern void spin_lock(int *);
38
39 static inline void *allocate_pages(unsigned bytes) {
40 void *address;
41 if (vm_allocate(mach_task_self(), (vm_address_t *)&address, bytes,
42 VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)| TRUE)) {
43 malloc_printf("malloc[%d]: Out of memory while stack logging\n", getpid());
44 abort();
45 }
46 return (void *)address;
47 }
48
49 static inline void deallocate_pages(void *ptr, unsigned bytes) {
50 vm_deallocate(mach_task_self(), (vm_address_t)ptr, bytes);
51 }
52
53 static inline void copy_pages(const void *source, void *dest, unsigned bytes) {
54 if (vm_copy(mach_task_self(), (vm_address_t)source, bytes, (vm_address_t)dest)) memmove(dest, source, bytes);
55 }
56
57 /*************** Recording stack ***********/
58
59 static void *first_frame_address(void) {
60 #if 0
61 return __builtin_frame_address(1);
62 #elif defined(__ppc__)
63 void *addr;
64 #warning __builtin_frame_address IS BROKEN IN BEAKER: RADAR #2340421
65 __asm__ volatile("mr %0, r1" : "=r" (addr));
66 return addr;
67 #else
68 #warning first_frame_address WILL NOT BE FUNCTIONAL ON THIS ARCHITECTURE
69 return NULL;
70 #endif
71 }
72
73 static void *next_frame_address(void *addr) {
74 void *ret;
75 #if defined(__MACH__) && defined(__i386__)
76 __asm__ volatile("movl (%1),%0" : "=r" (ret) : "r" (addr));
77 #elif defined(__MACH__) && defined(__ppc__)
78 __asm__ volatile("lwz %0,0x0(%1)" : "=r" (ret) : "b" (addr));
79 #elif defined(__hpux__)
80 __asm__ volatile("ldw 0x0(%1),%0" : "=r" (ret) : "r" (addr));
81 #elif defined(__svr4__)
82 __asm__ volatile("ta 0x3");
83 __asm__ volatile("ld [%1 + 56],%0" : "=r" (ret) : "r" (addr));
84 #else
85 #error Unknown architecture
86 #endif
87 return ret;
88 }
89
90 #if defined(__i386__) || defined (__m68k__)
91 #define FP_LINK_OFFSET 1
92 #elif defined(__ppc__)
93 #define FP_LINK_OFFSET 2
94 #elif defined(__hppa__)
95 #define FP_LINK_OFFSET -5
96 #elif defined(__sparc__)
97 #define FP_LINK_OFFSET 14
98 #else
99 #error ********** Unimplemented architecture
100 #endif
101
102 void thread_stack_pcs(vm_address_t *buffer, unsigned max, unsigned *nb) {
103 void *addr;
104 addr = first_frame_address();
105 *nb = 0;
106 while ((addr >= (void *)0x800) && (max--)) {
107 vm_address_t fp_link = (vm_address_t)(((unsigned *)addr)+FP_LINK_OFFSET);
108 void *addr2;
109 buffer[*nb] = *((vm_address_t *)fp_link);
110 (*nb)++;
111 addr2 = next_frame_address(addr);
112 #if defined(__ppc__)
113 if ((unsigned)addr2 <= (unsigned)addr) break; // catch bozo frames
114 #endif
115 addr = addr2;
116 }
117 }
118
119 /*************** Uniquing stack ***********/
120
121 #define MAX_COLLIDE 8
122
123 #define MAX_NUM_PC 512
124
125 static int enter_pair_in_table(unsigned *table, unsigned numPages, unsigned *uniquedParent, unsigned thisPC) {
126 // uniquedParent is in-out; return 1 is collisions max not exceeded
127 unsigned base = numPages * vm_page_size / (sizeof(int)*2*2);
128 unsigned hash = base + (((*uniquedParent) << 4) ^ (thisPC >> 2)) % (base - 1); // modulo odd number for hashing
129 unsigned collisions = MAX_COLLIDE;
130 while (collisions--) {
131 unsigned *head = table + hash*2;
132 if (! head[0] && !head[1]) {
133 /* end of chain; store this entry! */
134 /* Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry */
135 head[0] = thisPC;
136 head[1] = *uniquedParent;
137 *uniquedParent = hash;
138 return 1;
139 }
140 if ((head[0] == thisPC) && (head[1] == *uniquedParent)) {
141 /* we found the proper entry, the value for the pair is the entry offset */
142 *uniquedParent = hash;
143 return 1;
144 }
145 hash++;
146 if (hash == base*2) hash = base;
147 }
148 return 0;
149 }
150
151 unsigned stack_logging_get_unique_stack(unsigned **table, unsigned *table_num_pages, unsigned *stack_entries, unsigned count, unsigned num_hot_to_skip) {
152 unsigned uniquedParent = (unsigned)-1;
153 // we skip the warmest entries that are an artefact of the code
154 while (num_hot_to_skip--) {
155 if (count > 0) { stack_entries++; count--; }
156 }
157 while (count--) {
158 unsigned thisPC = stack_entries[count];
159 while (!enter_pair_in_table(*table, *table_num_pages, &uniquedParent, thisPC)) {
160 unsigned *newTable;
161 unsigned oldBytes = (*table_num_pages) * vm_page_size;
162 newTable = allocate_pages(oldBytes*2);
163 copy_pages(*table, newTable, oldBytes);
164 deallocate_pages(*table, oldBytes);
165 *table_num_pages *= 2;
166 *table = newTable;
167 }
168 }
169 return uniquedParent;
170 }
171
172 /*************** Logging stack and arguments ***********/
173
174 stack_logging_record_list_t *stack_logging_the_record_list = NULL;
175
176 int stack_logging_enable_logging = 0;
177
178 int stack_logging_dontcompact = 0;
179
180 static stack_logging_record_list_t *GrowLogRecords(stack_logging_record_list_t *records, unsigned desiredNumRecords) {
181 stack_logging_record_list_t *new_records;
182 unsigned old_size = records->overall_num_bytes;
183 if (desiredNumRecords*sizeof(stack_logging_record_t)+sizeof(stack_logging_record_list_t) < records->overall_num_bytes) return records;
184 records->overall_num_bytes += records->overall_num_bytes + vm_page_size; // in order to always get an even number of pages
185 new_records = allocate_pages(records->overall_num_bytes);
186 copy_pages(records, new_records, old_size);
187 deallocate_pages(records, old_size);
188 return new_records;
189 }
190
191 static void prepare_to_log_stack(void) {
192 if (!stack_logging_the_record_list) {
193 unsigned totalSize = 4 * vm_page_size;
194 stack_logging_the_record_list = allocate_pages(totalSize);
195 memset(stack_logging_the_record_list, 0, sizeof(stack_logging_record_list_t));
196 stack_logging_the_record_list->overall_num_bytes = totalSize;
197 stack_logging_the_record_list->uniquing_table_num_pages = 128;
198 stack_logging_the_record_list->uniquing_table = allocate_pages(stack_logging_the_record_list->uniquing_table_num_pages * vm_page_size);
199 }
200 }
201
202 void stack_logging_log_stack(unsigned type, unsigned arg1, unsigned arg2, unsigned arg3, unsigned result, unsigned num_hot_to_skip) {
203 stack_logging_record_t *rec;
204 if (!stack_logging_enable_logging) return;
205 // printf("stack_logging_log_stack 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
206 if (type & stack_logging_flag_zone) {
207 // just process it now and be done with it!
208 arg1 = arg2; arg2 = arg3; arg3 = 0; type &= ~stack_logging_flag_zone;
209 }
210 if (type & stack_logging_flag_calloc) {
211 // just process it now and be done with it!
212 arg1 *= arg2; arg2 = arg3; arg3 = 0; type &= ~stack_logging_flag_calloc;
213 }
214 if (type & stack_logging_flag_object) {
215 unsigned *class = (unsigned *)arg1;
216 arg1 = arg2 + class[5]; // corresponds to the instance_size field
217 arg2 = 0; arg3 = 0; type = stack_logging_type_alloc;
218 }
219 if (type & stack_logging_flag_cleared) {
220 type &= ~stack_logging_flag_cleared;
221 }
222 if (type & stack_logging_flag_handle) {
223 if (stack_logging_type_alloc) {
224 if (!result) return;
225 stack_logging_log_stack(stack_logging_type_alloc, 0, 0, 0, result, num_hot_to_skip+1);
226 stack_logging_log_stack(stack_logging_type_alloc, arg1, 0, 0, *((int *)result), num_hot_to_skip+1);
227 return;
228 }
229 if (stack_logging_type_dealloc) {
230 if (!arg1) return;
231 stack_logging_log_stack(stack_logging_type_dealloc, *((int *)arg1), 0, 0, 0, num_hot_to_skip+1);
232 stack_logging_log_stack(stack_logging_type_dealloc, arg1, 0, 0, 0, num_hot_to_skip+1);
233 return;
234 }
235 printf("*** Unknown logging type: 0x%x\n", type);
236 }
237 if (type == stack_logging_flag_set_handle_size) {
238 if (!arg1) return;
239 // Thanks to a horrible hack, arg3 contains the prvious handle value
240 if (arg3 == *((int *)arg1)) return;
241 stack_logging_log_stack(stack_logging_type_dealloc, arg3, 0, 0, 0, num_hot_to_skip+1);
242 stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, *((int *)arg1), num_hot_to_skip+1);
243 return;
244 }
245 if (type == (stack_logging_type_dealloc|stack_logging_type_alloc)) {
246 if (arg1 == result) return; // realloc had no effect, skipping
247 if (!arg1) {
248 // realloc(NULL, size) same as malloc(size)
249 type = stack_logging_type_alloc; arg1 = arg2; arg2 = arg3; arg3 = 0;
250 } else {
251 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
252 stack_logging_log_stack(stack_logging_type_dealloc, arg1, 0, 0, 0, num_hot_to_skip+1);
253 stack_logging_log_stack(stack_logging_type_alloc, arg2, 0, 0, result, num_hot_to_skip+1);
254 return;
255 }
256 }
257 if (type == stack_logging_type_dealloc) {
258 // simple free
259 if (!arg1) return; // free(nil)
260 }
261 prepare_to_log_stack();
262 spin_lock(&stack_logging_the_record_list->lock);
263 stack_logging_enable_logging = 0;
264 stack_logging_the_record_list = GrowLogRecords(stack_logging_the_record_list, stack_logging_the_record_list->num_records + 1);
265 rec = stack_logging_the_record_list->records + stack_logging_the_record_list->num_records;
266 // We take care of the common case of alloc-dealloc
267 if (!stack_logging_dontcompact && stack_logging_the_record_list->num_records && (type == stack_logging_type_dealloc) && arg1 && ((rec-1)->type == stack_logging_type_alloc) && (arg1 == STACK_LOGGING_DISGUISE((rec-1)->address))) {
268 stack_logging_the_record_list->num_records--;
269 // printf("Erased previous record in alloc-dealloc sequence\n");
270 } else {
271 unsigned stack_entries[MAX_NUM_PC];
272 unsigned count = 0;
273 rec->type = type;
274 if (type == stack_logging_type_dealloc) {
275 rec->argument = 0;
276 rec->address = STACK_LOGGING_DISGUISE(arg1); // we disguise the address
277 } else if (type == stack_logging_type_alloc) {
278 rec->argument = arg1;
279 rec->address = STACK_LOGGING_DISGUISE(result); // we disguise the address
280 } else {
281 rec->argument = arg2;
282 rec->address = STACK_LOGGING_DISGUISE(arg1); // we disguise the address
283 }
284 // printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
285 thread_stack_pcs(stack_entries, MAX_NUM_PC - 1, &count);
286 // We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
287 stack_entries[count++] = (int)pthread_self() + 1;
288 /* now let's unique the sample */
289 // printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
290 rec->uniqued_stack = stack_logging_get_unique_stack(&stack_logging_the_record_list->uniquing_table, &stack_logging_the_record_list->uniquing_table_num_pages, stack_entries, count, num_hot_to_skip+2); // we additionally skip the warmest 2 entries that are an artefact of the code
291 stack_logging_the_record_list->num_records++;
292 }
293 stack_logging_enable_logging = 1;
294 stack_logging_the_record_list->lock = 0;
295 }
296
297 static kern_return_t default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) {
298 *ptr = (void *)address;
299 return 0;
300 }
301
302 static kern_return_t get_remote_records(task_t task, memory_reader_t reader, stack_logging_record_list_t **records) {
303 // sets records
304 vm_address_t *remote_records_address_ref;
305 kern_return_t err;
306 *records = NULL;
307 err = reader(task, (vm_address_t)&stack_logging_the_record_list, sizeof(vm_address_t), (void **)&remote_records_address_ref);
308 if (err) return err;
309 if (!*remote_records_address_ref) {
310 // printf("stack_logging: no stack record\n");
311 return 0;
312 }
313 // printf("stack_logging: stack records at %p\n", (void *)(*remote_records_address_ref));
314 // printf("stack_logging: reading %d bytes\n", sizeof(stack_logging_record_list_t));
315 err = reader(task, *remote_records_address_ref, sizeof(stack_logging_record_list_t), (void **)records); // get the list head
316 if (err) return err;
317 // printf("stack_logging: overall num bytes = %d\n", records->overall_num_bytes);
318 return reader(task, *remote_records_address_ref, (*records)->overall_num_bytes, (void **)records);
319 }
320
321 kern_return_t stack_logging_get_frames(task_t task, memory_reader_t reader, vm_address_t address, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames) {
322 stack_logging_record_list_t *records;
323 kern_return_t err;
324 unsigned index;
325 unsigned disguised = STACK_LOGGING_DISGUISE(address);
326 if (!reader) reader = default_reader;
327 *num_frames = 0;
328 err = get_remote_records(task, reader, &records);
329 if (err || !records) return err;
330 // printf("stack_logging: %d records\n", records->num_records);
331 index = 0;
332 while (index < records->num_records) {
333 stack_logging_record_t *record = records->records + index;
334 if (record->address == disguised) {
335 return stack_logging_frames_for_uniqued_stack(task, reader, record->uniqued_stack, stack_frames_buffer, max_stack_frames, num_frames);
336 }
337 index++;
338 }
339 fprintf(stderr, "*** stack_logging: no record found for 0x%x\n", address);
340 return 0;
341 }
342
343 kern_return_t stack_logging_enumerate_records(task_t task, memory_reader_t reader, vm_address_t address, void enumerator(stack_logging_record_t, void *), void *context) {
344 stack_logging_record_list_t *records;
345 kern_return_t err;
346 unsigned index;
347 unsigned disguised = STACK_LOGGING_DISGUISE(address);
348 if (!reader) reader = default_reader;
349 err = get_remote_records(task, reader, &records);
350 if (err || !records) return err;
351 // printf("stack_logging: %d records\n", records->num_records);
352 index = 0;
353 while (index < records->num_records) {
354 stack_logging_record_t *record = records->records + index;
355 if (!address || (record->address == disguised)) enumerator(*record, context);
356 index++;
357 }
358 return 0;
359 }
360
361 kern_return_t stack_logging_frames_for_uniqued_stack(task_t task, memory_reader_t reader, unsigned uniqued_stack, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames) {
362 stack_logging_record_list_t *records;
363 unsigned *uniquing_table;
364 kern_return_t err;
365 if (!reader) reader = default_reader;
366 *num_frames = 0;
367 err = get_remote_records(task, reader, &records);
368 if (err || !records) return err;
369 err = reader(task, (vm_address_t)records->uniquing_table, records->uniquing_table_num_pages * vm_page_size, (void **)&uniquing_table);
370 if (err) return err;
371 while (max_stack_frames && (uniqued_stack != -1)) {
372 unsigned thisPC;
373 if ((uniqued_stack * 2 + 1) * sizeof(unsigned) >= records->uniquing_table_num_pages * vm_page_size) {
374 fprintf(stderr, "*** stack_logging: Invalid uniqued stack 0x%x", uniqued_stack);
375 break;
376 }
377 thisPC = uniquing_table[uniqued_stack * 2];
378 uniqued_stack = uniquing_table[uniqued_stack * 2 + 1];
379 if (!thisPC && !uniqued_stack) {
380 // Invalid entry
381 fprintf(stderr, "*** stack_logging: Invalid entry 0x%x", thisPC);
382 break;
383 }
384 stack_frames_buffer[0] = thisPC;
385 stack_frames_buffer++;
386 (*num_frames)++;
387 max_stack_frames--;
388 }
389 return 0;
390 }