2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* Bertrand from vmutils -> CF -> System */
26 #import "stack_logging.h"
31 #include <mach/vm_statistics.h>
32 #import <malloc/malloc.h>
35 extern void spin_lock(int *);
37 static inline void *allocate_pages(unsigned bytes
) {
39 if (vm_allocate(mach_task_self(), (vm_address_t
*)&address
, bytes
,
40 VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL
)| TRUE
)) {
41 malloc_printf("malloc[%d]: Out of memory while stack logging\n", getpid());
44 return (void *)address
;
47 static inline void deallocate_pages(void *ptr
, unsigned bytes
) {
48 vm_deallocate(mach_task_self(), (vm_address_t
)ptr
, bytes
);
51 static inline void copy_pages(const void *source
, void *dest
, unsigned bytes
) {
52 if (vm_copy(mach_task_self(), (vm_address_t
)source
, bytes
, (vm_address_t
)dest
)) memmove(dest
, source
, bytes
);
55 /*************** Recording stack ***********/
57 static void *first_frame_address(void) {
59 return __builtin_frame_address(1);
60 #elif defined(__ppc__)
62 #warning __builtin_frame_address IS BROKEN IN BEAKER: RADAR #2340421
63 __asm__
volatile("mr %0, r1" : "=r" (addr
));
66 #warning first_frame_address WILL NOT BE FUNCTIONAL ON THIS ARCHITECTURE
71 static void *next_frame_address(void *addr
) {
73 #if defined(__MACH__) && defined(__i386__)
74 __asm__
volatile("movl (%1),%0" : "=r" (ret
) : "r" (addr
));
75 #elif defined(__MACH__) && defined(__ppc__)
76 __asm__
volatile("lwz %0,0x0(%1)" : "=r" (ret
) : "b" (addr
));
77 #elif defined(__hpux__)
78 __asm__
volatile("ldw 0x0(%1),%0" : "=r" (ret
) : "r" (addr
));
79 #elif defined(__svr4__)
80 __asm__
volatile("ta 0x3");
81 __asm__
volatile("ld [%1 + 56],%0" : "=r" (ret
) : "r" (addr
));
83 #error Unknown architecture
88 #if defined(__i386__) || defined (__m68k__)
89 #define FP_LINK_OFFSET 1
90 #elif defined(__ppc__)
91 #define FP_LINK_OFFSET 2
92 #elif defined(__hppa__)
93 #define FP_LINK_OFFSET -5
94 #elif defined(__sparc__)
95 #define FP_LINK_OFFSET 14
97 #error ********** Unimplemented architecture
100 void thread_stack_pcs(vm_address_t
*buffer
, unsigned max
, unsigned *nb
) {
102 addr
= first_frame_address();
104 while ((addr
>= (void *)0x800) && (max
--)) {
105 vm_address_t fp_link
= (vm_address_t
)(((unsigned *)addr
)+FP_LINK_OFFSET
);
107 buffer
[*nb
] = *((vm_address_t
*)fp_link
);
109 addr2
= next_frame_address(addr
);
111 if ((unsigned)addr2
<= (unsigned)addr
) break; // catch bozo frames
117 /*************** Uniquing stack ***********/
119 #define MAX_COLLIDE 8
121 #define MAX_NUM_PC 512
123 static int enter_pair_in_table(unsigned *table
, unsigned numPages
, unsigned *uniquedParent
, unsigned thisPC
) {
124 // uniquedParent is in-out; return 1 is collisions max not exceeded
125 unsigned base
= numPages
* vm_page_size
/ (sizeof(int)*2*2);
126 unsigned hash
= base
+ (((*uniquedParent
) << 4) ^ (thisPC
>> 2)) % (base
- 1); // modulo odd number for hashing
127 unsigned collisions
= MAX_COLLIDE
;
128 while (collisions
--) {
129 unsigned *head
= table
+ hash
*2;
130 if (! head
[0] && !head
[1]) {
131 /* end of chain; store this entry! */
132 /* Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry */
134 head
[1] = *uniquedParent
;
135 *uniquedParent
= hash
;
138 if ((head
[0] == thisPC
) && (head
[1] == *uniquedParent
)) {
139 /* we found the proper entry, the value for the pair is the entry offset */
140 *uniquedParent
= hash
;
144 if (hash
== base
*2) hash
= base
;
149 unsigned stack_logging_get_unique_stack(unsigned **table
, unsigned *table_num_pages
, unsigned *stack_entries
, unsigned count
, unsigned num_hot_to_skip
) {
150 unsigned uniquedParent
= (unsigned)-1;
151 // we skip the warmest entries that are an artefact of the code
152 while (num_hot_to_skip
--) {
153 if (count
> 0) { stack_entries
++; count
--; }
156 unsigned thisPC
= stack_entries
[count
];
157 while (!enter_pair_in_table(*table
, *table_num_pages
, &uniquedParent
, thisPC
)) {
159 unsigned oldBytes
= (*table_num_pages
) * vm_page_size
;
160 newTable
= allocate_pages(oldBytes
*2);
161 copy_pages(*table
, newTable
, oldBytes
);
162 deallocate_pages(*table
, oldBytes
);
163 *table_num_pages
*= 2;
167 return uniquedParent
;
170 /*************** Logging stack and arguments ***********/
172 stack_logging_record_list_t
*stack_logging_the_record_list
= NULL
;
174 int stack_logging_enable_logging
= 0;
176 int stack_logging_dontcompact
= 0;
178 static stack_logging_record_list_t
*GrowLogRecords(stack_logging_record_list_t
*records
, unsigned desiredNumRecords
) {
179 stack_logging_record_list_t
*new_records
;
180 unsigned old_size
= records
->overall_num_bytes
;
181 if (desiredNumRecords
*sizeof(stack_logging_record_t
)+sizeof(stack_logging_record_list_t
) < records
->overall_num_bytes
) return records
;
182 records
->overall_num_bytes
+= records
->overall_num_bytes
+ vm_page_size
; // in order to always get an even number of pages
183 new_records
= allocate_pages(records
->overall_num_bytes
);
184 copy_pages(records
, new_records
, old_size
);
185 deallocate_pages(records
, old_size
);
189 static void prepare_to_log_stack(void) {
190 if (!stack_logging_the_record_list
) {
191 unsigned totalSize
= 4 * vm_page_size
;
192 stack_logging_the_record_list
= allocate_pages(totalSize
);
193 memset(stack_logging_the_record_list
, 0, sizeof(stack_logging_record_list_t
));
194 stack_logging_the_record_list
->overall_num_bytes
= totalSize
;
195 stack_logging_the_record_list
->uniquing_table_num_pages
= 128;
196 stack_logging_the_record_list
->uniquing_table
= allocate_pages(stack_logging_the_record_list
->uniquing_table_num_pages
* vm_page_size
);
200 void stack_logging_log_stack(unsigned type
, unsigned arg1
, unsigned arg2
, unsigned arg3
, unsigned result
, unsigned num_hot_to_skip
) {
201 stack_logging_record_t
*rec
;
202 if (!stack_logging_enable_logging
) return;
203 // printf("stack_logging_log_stack 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
204 if (type
& stack_logging_flag_zone
) {
205 // just process it now and be done with it!
206 arg1
= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_zone
;
208 if (type
& stack_logging_flag_calloc
) {
209 // just process it now and be done with it!
210 arg1
*= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_calloc
;
212 if (type
& stack_logging_flag_object
) {
213 unsigned *class = (unsigned *)arg1
;
214 arg1
= arg2
+ class[5]; // corresponds to the instance_size field
215 arg2
= 0; arg3
= 0; type
= stack_logging_type_alloc
;
217 if (type
& stack_logging_flag_cleared
) {
218 type
&= ~stack_logging_flag_cleared
;
220 if (type
& stack_logging_flag_handle
) {
221 if (stack_logging_type_alloc
) {
223 stack_logging_log_stack(stack_logging_type_alloc
, 0, 0, 0, result
, num_hot_to_skip
+1);
224 stack_logging_log_stack(stack_logging_type_alloc
, arg1
, 0, 0, *((int *)result
), num_hot_to_skip
+1);
227 if (stack_logging_type_dealloc
) {
229 stack_logging_log_stack(stack_logging_type_dealloc
, *((int *)arg1
), 0, 0, 0, num_hot_to_skip
+1);
230 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
233 printf("*** Unknown logging type: 0x%x\n", type
);
235 if (type
== stack_logging_flag_set_handle_size
) {
237 // Thanks to a horrible hack, arg3 contains the prvious handle value
238 if (arg3
== *((int *)arg1
)) return;
239 stack_logging_log_stack(stack_logging_type_dealloc
, arg3
, 0, 0, 0, num_hot_to_skip
+1);
240 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, *((int *)arg1
), num_hot_to_skip
+1);
243 if (type
== (stack_logging_type_dealloc
|stack_logging_type_alloc
)) {
244 if (arg1
== result
) return; // realloc had no effect, skipping
246 // realloc(NULL, size) same as malloc(size)
247 type
= stack_logging_type_alloc
; arg1
= arg2
; arg2
= arg3
; arg3
= 0;
249 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
250 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
251 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, result
, num_hot_to_skip
+1);
255 if (type
== stack_logging_type_dealloc
) {
257 if (!arg1
) return; // free(nil)
259 prepare_to_log_stack();
260 spin_lock(&stack_logging_the_record_list
->lock
);
261 stack_logging_enable_logging
= 0;
262 stack_logging_the_record_list
= GrowLogRecords(stack_logging_the_record_list
, stack_logging_the_record_list
->num_records
+ 1);
263 rec
= stack_logging_the_record_list
->records
+ stack_logging_the_record_list
->num_records
;
264 // We take care of the common case of alloc-dealloc
265 if (!stack_logging_dontcompact
&& stack_logging_the_record_list
->num_records
&& (type
== stack_logging_type_dealloc
) && arg1
&& ((rec
-1)->type
== stack_logging_type_alloc
) && (arg1
== STACK_LOGGING_DISGUISE((rec
-1)->address
))) {
266 stack_logging_the_record_list
->num_records
--;
267 // printf("Erased previous record in alloc-dealloc sequence\n");
269 unsigned stack_entries
[MAX_NUM_PC
];
272 if (type
== stack_logging_type_dealloc
) {
274 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
275 } else if (type
== stack_logging_type_alloc
) {
276 rec
->argument
= arg1
;
277 rec
->address
= STACK_LOGGING_DISGUISE(result
); // we disguise the address
279 rec
->argument
= arg2
;
280 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
282 // printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
283 thread_stack_pcs(stack_entries
, MAX_NUM_PC
- 1, &count
);
284 // We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
285 stack_entries
[count
++] = (int)pthread_self() + 1;
286 /* now let's unique the sample */
287 // printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
288 rec
->uniqued_stack
= stack_logging_get_unique_stack(&stack_logging_the_record_list
->uniquing_table
, &stack_logging_the_record_list
->uniquing_table_num_pages
, stack_entries
, count
, num_hot_to_skip
+2); // we additionally skip the warmest 2 entries that are an artefact of the code
289 stack_logging_the_record_list
->num_records
++;
291 stack_logging_enable_logging
= 1;
292 stack_logging_the_record_list
->lock
= 0;
295 static kern_return_t
default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
296 *ptr
= (void *)address
;
300 static kern_return_t
get_remote_records(task_t task
, memory_reader_t reader
, stack_logging_record_list_t
**records
) {
302 vm_address_t
*remote_records_address_ref
;
305 err
= reader(task
, (vm_address_t
)&stack_logging_the_record_list
, sizeof(vm_address_t
), (void **)&remote_records_address_ref
);
307 if (!*remote_records_address_ref
) {
308 // printf("stack_logging: no stack record\n");
311 // printf("stack_logging: stack records at %p\n", (void *)(*remote_records_address_ref));
312 // printf("stack_logging: reading %d bytes\n", sizeof(stack_logging_record_list_t));
313 err
= reader(task
, *remote_records_address_ref
, sizeof(stack_logging_record_list_t
), (void **)records
); // get the list head
315 // printf("stack_logging: overall num bytes = %d\n", records->overall_num_bytes);
316 return reader(task
, *remote_records_address_ref
, (*records
)->overall_num_bytes
, (void **)records
);
319 kern_return_t
stack_logging_get_frames(task_t task
, memory_reader_t reader
, vm_address_t address
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
320 stack_logging_record_list_t
*records
;
323 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
324 if (!reader
) reader
= default_reader
;
326 err
= get_remote_records(task
, reader
, &records
);
327 if (err
|| !records
) return err
;
328 // printf("stack_logging: %d records\n", records->num_records);
330 while (index
< records
->num_records
) {
331 stack_logging_record_t
*record
= records
->records
+ index
;
332 if (record
->address
== disguised
) {
333 return stack_logging_frames_for_uniqued_stack(task
, reader
, record
->uniqued_stack
, stack_frames_buffer
, max_stack_frames
, num_frames
);
337 fprintf(stderr
, "*** stack_logging: no record found for 0x%x\n", address
);
341 kern_return_t
stack_logging_enumerate_records(task_t task
, memory_reader_t reader
, vm_address_t address
, void enumerator(stack_logging_record_t
, void *), void *context
) {
342 stack_logging_record_list_t
*records
;
345 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
346 if (!reader
) reader
= default_reader
;
347 err
= get_remote_records(task
, reader
, &records
);
348 if (err
|| !records
) return err
;
349 // printf("stack_logging: %d records\n", records->num_records);
351 while (index
< records
->num_records
) {
352 stack_logging_record_t
*record
= records
->records
+ index
;
353 if (!address
|| (record
->address
== disguised
)) enumerator(*record
, context
);
359 kern_return_t
stack_logging_frames_for_uniqued_stack(task_t task
, memory_reader_t reader
, unsigned uniqued_stack
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
360 stack_logging_record_list_t
*records
;
361 unsigned *uniquing_table
;
363 if (!reader
) reader
= default_reader
;
365 err
= get_remote_records(task
, reader
, &records
);
366 if (err
|| !records
) return err
;
367 err
= reader(task
, (vm_address_t
)records
->uniquing_table
, records
->uniquing_table_num_pages
* vm_page_size
, (void **)&uniquing_table
);
369 while (max_stack_frames
&& (uniqued_stack
!= -1)) {
371 if ((uniqued_stack
* 2 + 1) * sizeof(unsigned) >= records
->uniquing_table_num_pages
* vm_page_size
) {
372 fprintf(stderr
, "*** stack_logging: Invalid uniqued stack 0x%x", uniqued_stack
);
375 thisPC
= uniquing_table
[uniqued_stack
* 2];
376 uniqued_stack
= uniquing_table
[uniqued_stack
* 2 + 1];
377 if (!thisPC
&& !uniqued_stack
) {
379 fprintf(stderr
, "*** stack_logging: Invalid entry 0x%x", thisPC
);
382 stack_frames_buffer
[0] = thisPC
;
383 stack_frames_buffer
++;