2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* Bertrand from vmutils -> CF -> System */
26 #import "stack_logging.h"
31 #include <mach/vm_statistics.h>
32 #import <malloc/malloc.h>
35 extern void spin_lock(int *);
37 static inline void *allocate_pages(unsigned) __attribute__((always_inline
));
38 static inline void *allocate_pages(unsigned bytes
) {
40 if (vm_allocate(mach_task_self(), (vm_address_t
*)&address
, bytes
,
41 VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL
)| TRUE
)) {
42 malloc_printf("*** out of memory while stack logging\n");
45 return (void *)address
;
48 static inline void deallocate_pages(void *, unsigned) __attribute__((always_inline
));
49 static inline void deallocate_pages(void *ptr
, unsigned bytes
) {
50 vm_deallocate(mach_task_self(), (vm_address_t
)ptr
, bytes
);
53 static inline void copy_pages(const void *, void *, unsigned) __attribute__((always_inline
));
54 static inline void copy_pages(const void *source
, void *dest
, unsigned bytes
) {
55 if (vm_copy(mach_task_self(), (vm_address_t
)source
, bytes
, (vm_address_t
)dest
)) memmove(dest
, source
, bytes
);
58 /*************** Recording stack ***********/
60 static void *first_frame_address(void) {
62 return __builtin_frame_address(1);
63 #elif defined(__ppc__) || defined(__ppc64__)
65 #warning __builtin_frame_address IS BROKEN IN BEAKER: RADAR #2340421
66 __asm__
volatile("mr %0, r1" : "=r" (addr
));
69 #warning first_frame_address WILL NOT BE FUNCTIONAL ON THIS ARCHITECTURE
74 static void *next_frame_address(void *addr
) {
76 #if defined(__MACH__) && defined(__i386__)
77 __asm__
volatile("movl (%1),%0" : "=r" (ret
) : "r" (addr
));
78 #elif defined(__MACH__) && (defined(__ppc__) || defined(__ppc64__))
79 __asm__
volatile("lwz %0,0x0(%1)" : "=r" (ret
) : "b" (addr
));
80 #elif defined(__hpux__)
81 __asm__
volatile("ldw 0x0(%1),%0" : "=r" (ret
) : "r" (addr
));
82 #elif defined(__svr4__)
83 __asm__
volatile("ta 0x3");
84 __asm__
volatile("ld [%1 + 56],%0" : "=r" (ret
) : "r" (addr
));
86 #error Unknown architecture
91 #if defined(__i386__) || defined (__m68k__)
92 #define FP_LINK_OFFSET 1
93 #elif defined(__ppc__) || defined(__ppc64__)
94 #define FP_LINK_OFFSET 2
95 #elif defined(__hppa__)
96 #define FP_LINK_OFFSET -5
97 #elif defined(__sparc__)
98 #define FP_LINK_OFFSET 14
100 #error ********** Unimplemented architecture
103 void thread_stack_pcs(vm_address_t
*buffer
, unsigned max
, unsigned *nb
) {
105 addr
= first_frame_address();
107 while ((addr
>= (void *)0x800) && (max
--)) {
108 vm_address_t fp_link
= (vm_address_t
)(((unsigned *)addr
)+FP_LINK_OFFSET
);
110 buffer
[*nb
] = *((vm_address_t
*)fp_link
);
112 addr2
= next_frame_address(addr
);
113 #if defined(__ppc__) || defined(__ppc64__)
114 if ((unsigned)addr2
<= (unsigned)addr
) break; // catch bozo frames
120 /*************** Uniquing stack ***********/
122 #define MAX_COLLIDE 8
124 #define MAX_NUM_PC 512
126 static int enter_pair_in_table(unsigned *table
, unsigned numPages
, unsigned *uniquedParent
, unsigned thisPC
) {
127 // uniquedParent is in-out; return 1 is collisions max not exceeded
128 unsigned base
= numPages
* vm_page_size
/ (sizeof(int)*2*2);
129 unsigned hash
= base
+ (((*uniquedParent
) << 4) ^ (thisPC
>> 2)) % (base
- 1); // modulo odd number for hashing
130 unsigned collisions
= MAX_COLLIDE
;
131 while (collisions
--) {
132 unsigned *head
= table
+ hash
*2;
133 if (! head
[0] && !head
[1]) {
134 /* end of chain; store this entry! */
135 /* Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry */
137 head
[1] = *uniquedParent
;
138 *uniquedParent
= hash
;
141 if ((head
[0] == thisPC
) && (head
[1] == *uniquedParent
)) {
142 /* we found the proper entry, the value for the pair is the entry offset */
143 *uniquedParent
= hash
;
147 if (hash
== base
*2) hash
= base
;
152 unsigned stack_logging_get_unique_stack(unsigned **table
, unsigned *table_num_pages
, unsigned *stack_entries
, unsigned count
, unsigned num_hot_to_skip
) {
153 unsigned uniquedParent
= (unsigned)-1;
154 // we skip the warmest entries that are an artefact of the code
155 while (num_hot_to_skip
--) {
156 if (count
> 0) { stack_entries
++; count
--; }
159 unsigned thisPC
= stack_entries
[count
];
160 while (!enter_pair_in_table(*table
, *table_num_pages
, &uniquedParent
, thisPC
)) {
162 unsigned oldBytes
= (*table_num_pages
) * vm_page_size
;
163 newTable
= allocate_pages(oldBytes
*2);
164 copy_pages(*table
, newTable
, oldBytes
);
165 deallocate_pages(*table
, oldBytes
);
166 *table_num_pages
*= 2;
170 return uniquedParent
;
173 /*************** Logging stack and arguments ***********/
175 stack_logging_record_list_t
*stack_logging_the_record_list
= NULL
;
177 int stack_logging_enable_logging
= 0;
179 int stack_logging_dontcompact
= 0;
181 static stack_logging_record_list_t
*GrowLogRecords(stack_logging_record_list_t
*records
, unsigned desiredNumRecords
) {
182 stack_logging_record_list_t
*new_records
;
183 unsigned old_size
= records
->overall_num_bytes
;
184 if (desiredNumRecords
*sizeof(stack_logging_record_t
)+sizeof(stack_logging_record_list_t
) < records
->overall_num_bytes
) return records
;
185 records
->overall_num_bytes
+= records
->overall_num_bytes
+ vm_page_size
; // in order to always get an even number of pages
186 new_records
= allocate_pages(records
->overall_num_bytes
);
187 copy_pages(records
, new_records
, old_size
);
188 deallocate_pages(records
, old_size
);
192 static void prepare_to_log_stack(void) {
193 if (!stack_logging_the_record_list
) {
194 unsigned totalSize
= 4 * vm_page_size
;
195 stack_logging_the_record_list
= allocate_pages(totalSize
);
196 memset(stack_logging_the_record_list
, 0, sizeof(stack_logging_record_list_t
));
197 stack_logging_the_record_list
->overall_num_bytes
= totalSize
;
198 stack_logging_the_record_list
->uniquing_table_num_pages
= 128;
199 stack_logging_the_record_list
->uniquing_table
= allocate_pages(stack_logging_the_record_list
->uniquing_table_num_pages
* vm_page_size
);
203 void stack_logging_log_stack(unsigned type
, unsigned arg1
, unsigned arg2
, unsigned arg3
, unsigned result
, unsigned num_hot_to_skip
) {
204 stack_logging_record_t
*rec
;
205 if (!stack_logging_enable_logging
) return;
206 // printf("stack_logging_log_stack 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
207 if (type
& stack_logging_flag_zone
) {
208 // just process it now and be done with it!
209 arg1
= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_zone
;
211 if (type
& stack_logging_flag_calloc
) {
212 // just process it now and be done with it!
213 arg1
*= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_calloc
;
215 if (type
& stack_logging_flag_object
) {
216 unsigned *class = (unsigned *)arg1
;
217 arg1
= arg2
+ class[5]; // corresponds to the instance_size field
218 arg2
= 0; arg3
= 0; type
= stack_logging_type_alloc
;
220 if (type
& stack_logging_flag_cleared
) {
221 type
&= ~stack_logging_flag_cleared
;
223 if (type
& stack_logging_flag_handle
) {
224 if (stack_logging_type_alloc
) {
226 stack_logging_log_stack(stack_logging_type_alloc
, 0, 0, 0, result
, num_hot_to_skip
+1);
227 stack_logging_log_stack(stack_logging_type_alloc
, arg1
, 0, 0, *((int *)result
), num_hot_to_skip
+1);
230 if (stack_logging_type_dealloc
) {
232 stack_logging_log_stack(stack_logging_type_dealloc
, *((int *)arg1
), 0, 0, 0, num_hot_to_skip
+1);
233 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
236 printf("*** Unknown logging type: 0x%x\n", type
);
238 if (type
== stack_logging_flag_set_handle_size
) {
240 // Thanks to a horrible hack, arg3 contains the prvious handle value
241 if (arg3
== *((int *)arg1
)) return;
242 stack_logging_log_stack(stack_logging_type_dealloc
, arg3
, 0, 0, 0, num_hot_to_skip
+1);
243 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, *((int *)arg1
), num_hot_to_skip
+1);
246 if (type
== (stack_logging_type_dealloc
|stack_logging_type_alloc
)) {
247 if (arg1
== result
) return; // realloc had no effect, skipping
249 // realloc(NULL, size) same as malloc(size)
250 type
= stack_logging_type_alloc
; arg1
= arg2
; arg2
= arg3
; arg3
= 0;
252 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
253 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
254 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, result
, num_hot_to_skip
+1);
258 if (type
== stack_logging_type_dealloc
) {
260 if (!arg1
) return; // free(nil)
262 prepare_to_log_stack();
263 spin_lock(&stack_logging_the_record_list
->lock
);
264 stack_logging_enable_logging
= 0;
265 stack_logging_the_record_list
= GrowLogRecords(stack_logging_the_record_list
, stack_logging_the_record_list
->num_records
+ 1);
266 rec
= stack_logging_the_record_list
->records
+ stack_logging_the_record_list
->num_records
;
267 // We take care of the common case of alloc-dealloc
268 if (!stack_logging_dontcompact
&& stack_logging_the_record_list
->num_records
&& (type
== stack_logging_type_dealloc
) && arg1
&& ((rec
-1)->type
== stack_logging_type_alloc
) && (arg1
== STACK_LOGGING_DISGUISE((rec
-1)->address
))) {
269 stack_logging_the_record_list
->num_records
--;
270 // printf("Erased previous record in alloc-dealloc sequence\n");
272 unsigned stack_entries
[MAX_NUM_PC
];
275 if (type
== stack_logging_type_dealloc
) {
277 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
278 } else if (type
== stack_logging_type_alloc
) {
279 rec
->argument
= arg1
;
280 rec
->address
= STACK_LOGGING_DISGUISE(result
); // we disguise the address
282 rec
->argument
= arg2
;
283 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
285 // printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
286 thread_stack_pcs(stack_entries
, MAX_NUM_PC
- 1, &count
);
287 // We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
288 stack_entries
[count
++] = (int)pthread_self() + 1;
289 /* now let's unique the sample */
290 // printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
291 rec
->uniqued_stack
= stack_logging_get_unique_stack(&stack_logging_the_record_list
->uniquing_table
, &stack_logging_the_record_list
->uniquing_table_num_pages
, stack_entries
, count
, num_hot_to_skip
+2); // we additionally skip the warmest 2 entries that are an artefact of the code
292 stack_logging_the_record_list
->num_records
++;
294 stack_logging_enable_logging
= 1;
295 stack_logging_the_record_list
->lock
= 0;
298 static kern_return_t
default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
299 *ptr
= (void *)address
;
303 static kern_return_t
get_remote_records(task_t task
, memory_reader_t reader
, stack_logging_record_list_t
**records
) {
305 vm_address_t
*remote_records_address_ref
;
308 err
= reader(task
, (vm_address_t
)&stack_logging_the_record_list
, sizeof(vm_address_t
), (void **)&remote_records_address_ref
);
310 if (!*remote_records_address_ref
) {
311 // printf("stack_logging: no stack record\n");
314 // printf("stack_logging: stack records at %p\n", (void *)(*remote_records_address_ref));
315 // printf("stack_logging: reading %d bytes\n", sizeof(stack_logging_record_list_t));
316 err
= reader(task
, *remote_records_address_ref
, sizeof(stack_logging_record_list_t
), (void **)records
); // get the list head
318 // printf("stack_logging: overall num bytes = %d\n", records->overall_num_bytes);
319 return reader(task
, *remote_records_address_ref
, (*records
)->overall_num_bytes
, (void **)records
);
322 kern_return_t
stack_logging_get_frames(task_t task
, memory_reader_t reader
, vm_address_t address
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
323 stack_logging_record_list_t
*records
;
326 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
327 if (!reader
) reader
= default_reader
;
329 err
= get_remote_records(task
, reader
, &records
);
330 if (err
|| !records
) return err
;
331 // printf("stack_logging: %d records\n", records->num_records);
333 while (index
< records
->num_records
) {
334 stack_logging_record_t
*record
= records
->records
+ index
;
335 if (record
->address
== disguised
) {
336 return stack_logging_frames_for_uniqued_stack(task
, reader
, record
->uniqued_stack
, stack_frames_buffer
, max_stack_frames
, num_frames
);
340 fprintf(stderr
, "*** stack_logging: no record found for 0x%x\n", address
);
344 kern_return_t
stack_logging_enumerate_records(task_t task
, memory_reader_t reader
, vm_address_t address
, void enumerator(stack_logging_record_t
, void *), void *context
) {
345 stack_logging_record_list_t
*records
;
348 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
349 if (!reader
) reader
= default_reader
;
350 err
= get_remote_records(task
, reader
, &records
);
351 if (err
|| !records
) return err
;
352 // printf("stack_logging: %d records\n", records->num_records);
354 while (index
< records
->num_records
) {
355 stack_logging_record_t
*record
= records
->records
+ index
;
356 if (!address
|| (record
->address
== disguised
)) enumerator(*record
, context
);
362 kern_return_t
stack_logging_frames_for_uniqued_stack(task_t task
, memory_reader_t reader
, unsigned uniqued_stack
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
363 stack_logging_record_list_t
*records
;
364 unsigned *uniquing_table
;
366 if (!reader
) reader
= default_reader
;
368 err
= get_remote_records(task
, reader
, &records
);
369 if (err
|| !records
) return err
;
370 err
= reader(task
, (vm_address_t
)records
->uniquing_table
, records
->uniquing_table_num_pages
* vm_page_size
, (void **)&uniquing_table
);
372 while (max_stack_frames
&& (uniqued_stack
!= -1)) {
374 if ((uniqued_stack
* 2 + 1) * sizeof(unsigned) >= records
->uniquing_table_num_pages
* vm_page_size
) {
375 fprintf(stderr
, "*** stack_logging: Invalid uniqued stack 0x%x", uniqued_stack
);
378 thisPC
= uniquing_table
[uniqued_stack
* 2];
379 uniqued_stack
= uniquing_table
[uniqued_stack
* 2 + 1];
380 if (!thisPC
&& !uniqued_stack
) {
382 fprintf(stderr
, "*** stack_logging: Invalid entry 0x%x", thisPC
);
385 stack_frames_buffer
[0] = thisPC
;
386 stack_frames_buffer
++;