2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* Bertrand from vmutils -> CF -> System */
26 #import "stack_logging.h"
27 #import "malloc_printf.h"
32 #include <mach/vm_statistics.h>
33 #import <malloc/malloc.h>
36 extern void spin_lock(int *);
37 extern void spin_unlock(int *);
38 extern void thread_stack_pcs(vm_address_t
*, unsigned, unsigned *);
40 static inline void *allocate_pages(unsigned) __attribute__((always_inline
));
41 static inline void *allocate_pages(unsigned bytes
) {
43 if (vm_allocate(mach_task_self(), (vm_address_t
*)&address
, bytes
,
44 VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL
)| TRUE
)) {
45 malloc_printf("*** out of memory while stack logging\n");
48 return (void *)address
;
51 static inline void deallocate_pages(void *, unsigned) __attribute__((always_inline
));
52 static inline void deallocate_pages(void *ptr
, unsigned bytes
) {
53 vm_deallocate(mach_task_self(), (vm_address_t
)ptr
, bytes
);
56 static inline void copy_pages(const void *, void *, unsigned) __attribute__((always_inline
));
57 static inline void copy_pages(const void *source
, void *dest
, unsigned bytes
) {
58 if (vm_copy(mach_task_self(), (vm_address_t
)source
, bytes
, (vm_address_t
)dest
)) memmove(dest
, source
, bytes
);
61 /*************** Uniquing stack ***********/
65 #define MAX_NUM_PC 512
67 static int enter_pair_in_table(unsigned *table
, unsigned numPages
, unsigned *uniquedParent
, unsigned thisPC
) {
68 // uniquedParent is in-out; return 1 is collisions max not exceeded
69 unsigned base
= numPages
* vm_page_size
/ (sizeof(int)*2*2);
70 unsigned hash
= base
+ (((*uniquedParent
) << 4) ^ (thisPC
>> 2)) % (base
- 1); // modulo odd number for hashing
71 unsigned collisions
= MAX_COLLIDE
;
72 while (collisions
--) {
73 unsigned *head
= table
+ hash
*2;
74 if (! head
[0] && !head
[1]) {
75 /* end of chain; store this entry! */
76 /* Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry */
78 head
[1] = *uniquedParent
;
79 *uniquedParent
= hash
;
82 if ((head
[0] == thisPC
) && (head
[1] == *uniquedParent
)) {
83 /* we found the proper entry, the value for the pair is the entry offset */
84 *uniquedParent
= hash
;
88 if (hash
== base
*2) hash
= base
;
93 unsigned stack_logging_get_unique_stack(unsigned **table
, unsigned *table_num_pages
, unsigned *stack_entries
, unsigned count
, unsigned num_hot_to_skip
) {
94 unsigned uniquedParent
= (unsigned)-1;
95 // we skip the warmest entries that are an artefact of the code
96 while (num_hot_to_skip
--) {
97 if (count
> 0) { stack_entries
++; count
--; }
100 unsigned thisPC
= stack_entries
[count
];
101 while (!enter_pair_in_table(*table
, *table_num_pages
, &uniquedParent
, thisPC
)) {
103 unsigned oldBytes
= (*table_num_pages
) * vm_page_size
;
104 newTable
= allocate_pages(oldBytes
*2);
105 copy_pages(*table
, newTable
, oldBytes
);
106 deallocate_pages(*table
, oldBytes
);
107 *table_num_pages
*= 2;
111 return uniquedParent
;
114 /*************** Logging stack and arguments ***********/
116 stack_logging_record_list_t
*stack_logging_the_record_list
= NULL
;
118 int stack_logging_enable_logging
= 0;
120 int stack_logging_dontcompact
= 0;
122 static int stack_logging_spin_lock
= 0;
124 static stack_logging_record_list_t
*GrowLogRecords(stack_logging_record_list_t
*records
, unsigned desiredNumRecords
) {
125 stack_logging_record_list_t
*new_records
;
126 unsigned old_size
= records
->overall_num_bytes
;
127 if (desiredNumRecords
*sizeof(stack_logging_record_t
)+sizeof(stack_logging_record_list_t
) < records
->overall_num_bytes
) return records
;
128 records
->overall_num_bytes
+= records
->overall_num_bytes
+ vm_page_size
; // in order to always get an even number of pages
129 new_records
= allocate_pages(records
->overall_num_bytes
);
130 copy_pages(records
, new_records
, old_size
);
131 deallocate_pages(records
, old_size
);
135 static void prepare_to_log_stack(void) {
136 if (!stack_logging_the_record_list
) {
137 unsigned totalSize
= 4 * vm_page_size
;
138 stack_logging_the_record_list
= allocate_pages(totalSize
);
139 memset(stack_logging_the_record_list
, 0, sizeof(stack_logging_record_list_t
));
140 stack_logging_the_record_list
->overall_num_bytes
= totalSize
;
141 stack_logging_the_record_list
->uniquing_table_num_pages
= 128;
142 stack_logging_the_record_list
->uniquing_table
= allocate_pages(stack_logging_the_record_list
->uniquing_table_num_pages
* vm_page_size
);
146 void stack_logging_log_stack(unsigned type
, unsigned arg1
, unsigned arg2
, unsigned arg3
, unsigned result
, unsigned num_hot_to_skip
) {
147 stack_logging_record_t
*rec
;
148 if (!stack_logging_enable_logging
) return;
149 // printf("stack_logging_log_stack 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
150 if (type
& stack_logging_flag_zone
) {
151 // just process it now and be done with it!
152 arg1
= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_zone
;
154 if (type
& stack_logging_flag_calloc
) {
155 // just process it now and be done with it!
156 arg1
*= arg2
; arg2
= arg3
; arg3
= 0; type
&= ~stack_logging_flag_calloc
;
158 if (type
& stack_logging_flag_object
) {
159 unsigned *class = (unsigned *)arg1
;
160 arg1
= arg2
+ class[5]; // corresponds to the instance_size field
161 arg2
= 0; arg3
= 0; type
= stack_logging_type_alloc
;
163 if (type
& stack_logging_flag_cleared
) {
164 type
&= ~stack_logging_flag_cleared
;
166 if (type
& stack_logging_flag_handle
) {
167 if (stack_logging_type_alloc
) {
169 stack_logging_log_stack(stack_logging_type_alloc
, 0, 0, 0, result
, num_hot_to_skip
+1);
170 stack_logging_log_stack(stack_logging_type_alloc
, arg1
, 0, 0, *((int *)result
), num_hot_to_skip
+1);
173 if (stack_logging_type_dealloc
) {
175 stack_logging_log_stack(stack_logging_type_dealloc
, *((int *)arg1
), 0, 0, 0, num_hot_to_skip
+1);
176 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
179 fprintf(stderr
, "*** Unknown logging type: 0x%x\n", type
);
181 if (type
== stack_logging_flag_set_handle_size
) {
183 // Thanks to a horrible hack, arg3 contains the prvious handle value
184 if (arg3
== *((int *)arg1
)) return;
185 stack_logging_log_stack(stack_logging_type_dealloc
, arg3
, 0, 0, 0, num_hot_to_skip
+1);
186 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, *((int *)arg1
), num_hot_to_skip
+1);
189 if (type
== (stack_logging_type_dealloc
|stack_logging_type_alloc
)) {
190 if (arg1
== result
) return; // realloc had no effect, skipping
192 // realloc(NULL, size) same as malloc(size)
193 type
= stack_logging_type_alloc
; arg1
= arg2
; arg2
= arg3
; arg3
= 0;
195 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
196 stack_logging_log_stack(stack_logging_type_dealloc
, arg1
, 0, 0, 0, num_hot_to_skip
+1);
197 stack_logging_log_stack(stack_logging_type_alloc
, arg2
, 0, 0, result
, num_hot_to_skip
+1);
201 if (type
== stack_logging_type_dealloc
) {
203 if (!arg1
) return; // free(nil)
205 prepare_to_log_stack();
206 spin_lock(&stack_logging_spin_lock
);
207 stack_logging_the_record_list
= GrowLogRecords(stack_logging_the_record_list
, stack_logging_the_record_list
->num_records
+ 1);
208 rec
= stack_logging_the_record_list
->records
+ stack_logging_the_record_list
->num_records
;
209 // We take care of the common case of alloc-dealloc
210 if (!stack_logging_dontcompact
&& stack_logging_the_record_list
->num_records
&& (type
== stack_logging_type_dealloc
) && arg1
&& ((rec
-1)->type
== stack_logging_type_alloc
) && (arg1
== STACK_LOGGING_DISGUISE((rec
-1)->address
))) {
211 stack_logging_the_record_list
->num_records
--;
212 // printf("Erased previous record in alloc-dealloc sequence\n");
214 unsigned stack_entries
[MAX_NUM_PC
];
217 if (type
== stack_logging_type_dealloc
) {
219 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
220 } else if (type
== stack_logging_type_alloc
) {
221 rec
->argument
= arg1
;
222 rec
->address
= STACK_LOGGING_DISGUISE(result
); // we disguise the address
224 rec
->argument
= arg2
;
225 rec
->address
= STACK_LOGGING_DISGUISE(arg1
); // we disguise the address
227 // printf("Before getting samples 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
228 thread_stack_pcs(stack_entries
, MAX_NUM_PC
- 1, &count
);
229 // We put at the bottom of the stack a marker that denotes the thread (+1 for good measure...)
230 stack_entries
[count
++] = (int)pthread_self() + 1;
231 /* now let's unique the sample */
232 // printf("Uniquing 0x%x 0x%x 0x%x 0x%x -> 0x%x\n", type, arg1, arg2, arg3, result);
233 rec
->uniqued_stack
= stack_logging_get_unique_stack(&stack_logging_the_record_list
->uniquing_table
, &stack_logging_the_record_list
->uniquing_table_num_pages
, stack_entries
, count
, num_hot_to_skip
+2); // we additionally skip the warmest 2 entries that are an artefact of the code
234 stack_logging_the_record_list
->num_records
++;
236 spin_unlock(&stack_logging_spin_lock
);
239 static kern_return_t
default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
240 *ptr
= (void *)address
;
244 static kern_return_t
get_remote_records(task_t task
, memory_reader_t reader
, stack_logging_record_list_t
**records
) {
246 vm_address_t
*remote_records_address_ref
;
249 err
= reader(task
, (vm_address_t
)&stack_logging_the_record_list
, sizeof(vm_address_t
), (void **)&remote_records_address_ref
);
251 if (!*remote_records_address_ref
) {
252 // printf("stack_logging: no stack record\n");
255 // printf("stack_logging: stack records at %p\n", (void *)(*remote_records_address_ref));
256 // printf("stack_logging: reading %d bytes\n", sizeof(stack_logging_record_list_t));
257 err
= reader(task
, *remote_records_address_ref
, sizeof(stack_logging_record_list_t
), (void **)records
); // get the list head
259 // printf("stack_logging: overall num bytes = %d\n", records->overall_num_bytes);
260 return reader(task
, *remote_records_address_ref
, (*records
)->overall_num_bytes
, (void **)records
);
263 kern_return_t
stack_logging_get_frames(task_t task
, memory_reader_t reader
, vm_address_t address
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
264 stack_logging_record_list_t
*records
;
267 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
268 if (!reader
) reader
= default_reader
;
270 err
= get_remote_records(task
, reader
, &records
);
271 if (err
|| !records
) return err
;
272 // printf("stack_logging: %d records\n", records->num_records);
274 while (index
< records
->num_records
) {
275 stack_logging_record_t
*record
= records
->records
+ index
;
276 if (record
->address
== disguised
) {
277 return stack_logging_frames_for_uniqued_stack(task
, reader
, record
->uniqued_stack
, stack_frames_buffer
, max_stack_frames
, num_frames
);
281 fprintf(stderr
, "*** stack_logging: no record found for 0x%x\n", address
);
285 kern_return_t
stack_logging_enumerate_records(task_t task
, memory_reader_t reader
, vm_address_t address
, void enumerator(stack_logging_record_t
, void *), void *context
) {
286 stack_logging_record_list_t
*records
;
289 unsigned disguised
= STACK_LOGGING_DISGUISE(address
);
290 if (!reader
) reader
= default_reader
;
291 err
= get_remote_records(task
, reader
, &records
);
292 if (err
|| !records
) return err
;
293 // printf("stack_logging: %d records\n", records->num_records);
295 while (index
< records
->num_records
) {
296 stack_logging_record_t
*record
= records
->records
+ index
;
297 if (!address
|| (record
->address
== disguised
)) enumerator(*record
, context
);
303 kern_return_t
stack_logging_frames_for_uniqued_stack(task_t task
, memory_reader_t reader
, unsigned uniqued_stack
, vm_address_t
*stack_frames_buffer
, unsigned max_stack_frames
, unsigned *num_frames
) {
304 stack_logging_record_list_t
*records
;
305 unsigned *uniquing_table
;
307 if (!reader
) reader
= default_reader
;
309 err
= get_remote_records(task
, reader
, &records
);
310 if (err
|| !records
) return err
;
311 err
= reader(task
, (vm_address_t
)records
->uniquing_table
, records
->uniquing_table_num_pages
* vm_page_size
, (void **)&uniquing_table
);
313 while (max_stack_frames
&& (uniqued_stack
!= -1)) {
315 if ((uniqued_stack
* 2 + 1) * sizeof(unsigned) >= records
->uniquing_table_num_pages
* vm_page_size
) {
316 fprintf(stderr
, "*** stack_logging: Invalid uniqued stack 0x%x", uniqued_stack
);
319 thisPC
= uniquing_table
[uniqued_stack
* 2];
320 uniqued_stack
= uniquing_table
[uniqued_stack
* 2 + 1];
321 if (!thisPC
&& !uniqued_stack
) {
323 fprintf(stderr
, "*** stack_logging: Invalid entry 0x%x", thisPC
);
326 stack_frames_buffer
[0] = thisPC
;
327 stack_frames_buffer
++;