2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
39 #include "stack_logging.h"
40 #include "malloc_printf.h"
41 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
46 #ifdef TEST_DISK_STACK_LOGGING
47 #define _malloc_printf fprintf
49 #define ASL_LEVEL_INFO stderr
52 #define STACK_LOGGING_THREAD_HASH_SIZE 2048 // must be an even power of two
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_NUMBER_RECENT_BACKTRACES 50
56 #define STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY 100
57 #define STACK_LOGGING_MAX_THREAD_COLLISIONS 3
58 #define STACK_LOGGING_MIN_SAME_FRAMES 3
59 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
60 #define STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE 5
61 #define STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY 1 << 14 // <2mb for 32->32, ~3mb for (32->64 || 64->32), ~4mb for 64->64
62 #define STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE 3
63 #define STACK_LOGGING_REMOTE_LINKS_PER_BLOCK (1 << 20) // this sets a maximum number of malloc/frees that can be read in to: 1^30;
64 // this means if the .index file is >24gb, remote access will start to fail.
65 // note: at this point, the .stack file will probably be ~56gb on top of that and
66 // it'll also be using around 20 gb of memory in the analyzing process...
67 // all of these are 64-bit stats; the 32-bit analyzing process limits are lower.
68 // in short, if you want to analyze a process making > 1 billion malloc/frees
69 // (after compaction), bump this number slightly.
74 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
75 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
76 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
81 #pragma mark - stack_logging_backtrace_event
83 int16_t offset_delta
; // may want to expand this one; should always be < 0.
84 uint16_t num_identical_frames
;
85 uint16_t num_new_hot_frames
; // count of backtrace[]
86 } stack_logging_backtrace_event
;
88 #pragma mark - stack_logging_index_event
92 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
93 } stack_logging_index_event
;
95 #pragma mark - stack_logging_index_event32
99 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
100 } stack_logging_index_event32
;
102 #pragma mark - stack_logging_index_event64
106 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
107 } stack_logging_index_event64
;
109 #pragma mark - thread_backtrace_history
110 // for management of previous backtraces (by thread):
114 uint64_t logging_index
;
115 int64_t logging_offset
;
116 uint32_t full_backtrace_countdown
;
117 uint32_t backtrace_length
;
118 uintptr_t *backtrace
;
119 } thread_backtrace_history
;
121 #pragma mark - stack_buffer_shared_memory
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
127 uint64_t start_index_offset
;
128 uint64_t start_stack_offset
;
129 uint32_t next_free_index_buffer_offset
;
130 uint32_t next_free_stack_buffer_offset
;
131 char index_buffer
[STACK_LOGGING_BLOCK_WRITING_SIZE
];
132 char stack_buffer
[STACK_LOGGING_BLOCK_WRITING_SIZE
];
133 } stack_buffer_shared_memory
;
136 #pragma mark - index_ll_node
137 // linked-list node in table for allocations of a single address
138 typedef struct index_ll_node
{
139 struct index_ll_node
*next
;
140 uint64_t index_file_offset
;
143 #pragma mark - remote_index_node32
144 // 32-bit target process address slot in table
147 index_ll_node
*linked_list
;
148 index_ll_node
*last_link
;
149 } remote_index_node32
;
151 #pragma mark - remote_index_node64
152 // 64-bit target process variant
155 index_ll_node
*linked_list
;
156 index_ll_node
*last_link
;
157 } remote_index_node64
;
159 #pragma mark - remote_index_cache
160 // for caching index information client-side:
163 size_t cache_node_capacity
;
164 uint32_t collision_allowance
;
165 uint64_t cache_node_count
; // Debug only.
166 uint64_t cache_llnode_count
; // Debug only.
167 size_t in_use_node_size
; // sizeof(remote_index_node32) || sizeof(remote_index_node64)
168 void *table_memory
; // this can be malloced; it's on the client side.
169 remote_index_node32
*casted_table32
; // represents table memory as 32-bit.
170 remote_index_node64
*casted_table64
; // ditto, 64-bit
171 stack_buffer_shared_memory
*shmem
; // shared memory
172 stack_buffer_shared_memory snapshot
; // memory snapshot of the remote process' shared memory
173 uint32_t last_pre_written_index_size
;
174 uint64_t last_index_file_offset
;
175 index_ll_node
*blocks
[1024];
176 uint32_t current_block
;
177 uint32_t next_block_index
;
178 } remote_index_cache
;
180 #pragma mark - remote_task_file_streams
181 // for reading stack history information from remote processes:
185 int32_t task_is_64_bit
;
186 int32_t in_use_count
;
187 FILE *index_file_stream
;
188 FILE *stack_file_stream
;
189 remote_index_cache
*cache
;
190 } remote_task_file_streams
;
193 #pragma mark Constants
195 static stack_buffer_shared_memory
*pre_write_buffers
;
196 static char *pre_write_backtrace_event_buffer
= NULL
;
197 static char *pre_write_index_buffer
= NULL
;
199 static OSSpinLock stack_logging_lock
= OS_SPINLOCK_INIT
;
200 static uint64_t current_logging_index
= 0;
201 static int64_t total_offset
= 0;
203 // single-thread access variables
204 static vm_address_t stack_buffer
[STACK_LOGGING_NUMBER_RECENT_BACKTRACES
][STACK_LOGGING_MAX_STACK_SIZE
];
205 static thread_backtrace_history thread_buffer
[STACK_LOGGING_THREAD_HASH_SIZE
];
206 static int32_t current_stack_buffer
= 0;
207 static uintptr_t last_logged_malloc_address
= 0;
208 static uint32_t last_logged_backtrace_offset_diff
= 0;
209 static thread_backtrace_history compaction_saved_differencing_history
;
211 // Constants to define stack logging directory and path names.
212 // Files will get written to /tmp/stack-logs.<pid>.<progname>.XXXXXX/stack-logs.{index,stacks}
213 // The directory is securely created with mkdtemp() and the files inside it just have static names for simplicity.
214 static const char *temporary_directory
= "/tmp";
215 static const char *stack_logging_directory_base_name
= "stack-logs.";
216 static const char *index_file_name
= "stack-logs.index";
217 static const char *stack_file_name
= "stack-logs.stacks";
219 static char stack_logs_directory
[PATH_MAX
];
220 static char index_file_path
[PATH_MAX
];
221 static char stack_file_path
[PATH_MAX
];
222 static int index_file_descriptor
= -1;
223 static int stack_file_descriptor
= -1;
225 // for accessing remote log files
226 static remote_task_file_streams remote_fds
[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
];
227 static uint32_t next_remote_task_fd
= 0;
228 static uint32_t remote_task_fd_count
= 0;
229 static OSSpinLock remote_fd_list_lock
= OS_SPINLOCK_INIT
;
231 // activation variables
233 static int logging_use_compaction
= 1; // set this to zero to always disable compaction.
235 // We set malloc_logger to NULL to disable logging, if we encounter errors
236 // during file writing
237 typedef void (malloc_logger_t
)(uint32_t type
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
, uintptr_t result
, uint32_t num_hot_frames_to_skip
);
238 extern malloc_logger_t
*malloc_logger
;
241 #pragma mark Disk Stack Logging
243 static void delete_log_files(void); // pre-declare
246 append_int(char * filename
, pid_t pid
)
253 len
= strlen(filename
);
262 filename
[len
+ count
] = 0;
265 for(i
= 0 ; i
< count
; i
++) {
266 filename
[len
+ count
- 1 - i
] = '0' + value
% 10;
271 // If successful, returns path to directory that was created. Otherwise returns NULL.
273 create_log_files(void)
275 pid_t pid
= getpid();
276 const char *progname
= getprogname();
277 char path_name
[PATH_MAX
];
278 char *created_directory
= NULL
;
280 // WARNING! use of snprintf can induce malloc() calls
281 strlcpy(stack_logs_directory
, temporary_directory
, PATH_MAX
);
282 strlcat(stack_logs_directory
, "/", PATH_MAX
);
283 strlcat(stack_logs_directory
, stack_logging_directory_base_name
, PATH_MAX
);
284 append_int(stack_logs_directory
, pid
);
285 if (progname
&& progname
[0] != '\0') {
286 strlcat(stack_logs_directory
, ".", PATH_MAX
);
287 strlcat(stack_logs_directory
, progname
, PATH_MAX
);
289 strlcat(stack_logs_directory
, ".XXXXXX", PATH_MAX
);
291 // Securely make temporary directory for the log files, then create the files.
292 if (mkdtemp(stack_logs_directory
) == stack_logs_directory
) {
293 strlcpy(path_name
, stack_logs_directory
, PATH_MAX
);
294 strlcat(path_name
, "/", PATH_MAX
);
295 strlcat(path_name
, index_file_name
, PATH_MAX
);
296 strlcpy(index_file_path
, path_name
, PATH_MAX
);
297 index_file_descriptor
= open(path_name
, O_WRONLY
| O_TRUNC
| O_CREAT
, 0600);
299 strlcpy(path_name
, stack_logs_directory
, PATH_MAX
);
300 strlcat(path_name
, "/", PATH_MAX
);
301 strlcat(path_name
, stack_file_name
, PATH_MAX
);
302 strlcpy(stack_file_path
, path_name
, PATH_MAX
);
303 stack_file_descriptor
= open(path_name
, O_WRONLY
| O_TRUNC
| O_CREAT
, 0600);
305 if (index_file_descriptor
== -1 || stack_file_descriptor
== -1) {
306 _malloc_printf(ASL_LEVEL_INFO
, "unable to create stack log files in directory %s\n", stack_logs_directory
);
308 created_directory
= NULL
;
310 _malloc_printf(ASL_LEVEL_INFO
, "stack logs being written into %s\n", stack_logs_directory
);
311 created_directory
= stack_logs_directory
;
314 _malloc_printf(ASL_LEVEL_INFO
, "unable to create stack log directory %s\n", stack_logs_directory
);
315 created_directory
= NULL
;
317 return created_directory
;
320 // This function may be called from either the target process when exiting, or from either the the target process or
321 // a stack log analysis process, when reaping orphaned stack log files.
322 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
324 delete_log_files_in_directory(char *logdir
)
326 char path_name
[PATH_MAX
];
327 int unlink_count
= 0;
328 int failure_count
= 0;
331 if (logdir
== NULL
|| logdir
[0] == '\0') return 0;
333 strlcpy(path_name
, logdir
, PATH_MAX
);
334 strlcat(path_name
, "/", PATH_MAX
);
335 strlcat(path_name
, index_file_name
, PATH_MAX
);
336 if (unlink(path_name
) == 0) {
338 } else if (stat(path_name
, &statbuf
) == 0) {
342 strlcpy(path_name
, logdir
, PATH_MAX
);
343 strlcat(path_name
, "/", PATH_MAX
);
344 strlcat(path_name
, stack_file_name
, PATH_MAX
);
345 if (unlink(path_name
) == 0) {
347 } else if (stat(path_name
, &statbuf
) == 0) {
351 if (rmdir(logdir
) == -1) failure_count
++;
353 return (failure_count
> 0) ? -1 : 0;
356 // This function will be called from atexit() in the target process.
358 delete_log_files(void)
360 if (stack_logs_directory
== NULL
|| stack_logs_directory
[0] == '\0') return;
362 if (delete_log_files_in_directory(stack_logs_directory
) == 0) {
363 _malloc_printf(ASL_LEVEL_INFO
, "stack logs deleted from %s\n", stack_logs_directory
);
364 stack_file_path
[0] = '\0';
365 index_file_path
[0] = '\0';
367 _malloc_printf(ASL_LEVEL_INFO
, "unable to delete stack logs from %s\n", stack_logs_directory
);
372 is_process_running(pid_t pid
)
374 struct kinfo_proc kpt
[1];
375 size_t size
= sizeof(struct kinfo_proc
);
376 int mib
[] = {CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid
};
378 sysctl(mib
, 4, kpt
, &size
, NULL
, 0); // size is either 1 or 0 entries when we ask for a single pid
380 return (size
==sizeof(struct kinfo_proc
));
383 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
384 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
385 // So, reap any stack log files for processes that no longer exist.
387 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
388 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
389 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
390 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
391 // its own log files.
393 reap_orphaned_log_files(bool remove_for_this_pid
)
396 struct dirent
*entry
;
398 char prefix_name
[PATH_MAX
];
399 char pathname
[PATH_MAX
];
400 pid_t current_pid
= getpid();
402 if ((dp
= opendir(temporary_directory
)) == NULL
) {
406 strlcpy(prefix_name
, stack_logging_directory_base_name
, PATH_MAX
);
407 prefix_length
= strlen(prefix_name
);
409 while ( (entry
= readdir(dp
)) != NULL
) {
410 if ( entry
->d_type
== DT_DIR
&& ( strncmp( entry
->d_name
, prefix_name
, prefix_length
) == 0 ) ) {
411 long pid
= strtol(&entry
->d_name
[prefix_length
], (char **)NULL
, 10);
412 if ( (! is_process_running(pid
)) || (remove_for_this_pid
&& pid
== current_pid
) ) {
413 strlcpy(pathname
, temporary_directory
, PATH_MAX
);
414 strlcat(pathname
, "/", PATH_MAX
);
415 strlcat(pathname
, entry
->d_name
, PATH_MAX
);
416 if (delete_log_files_in_directory(pathname
) == 0) {
417 if (remove_for_this_pid
&& pid
== current_pid
) {
418 _malloc_printf(ASL_LEVEL_INFO
, "stack logs deleted from %s\n", pathname
);
420 _malloc_printf(ASL_LEVEL_INFO
, "process %d no longer exists, stack logs deleted from %s\n", pid
, pathname
);
430 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
431 * for disabling any future logging in this process and for informing the user.
434 disable_stack_logging(void)
436 _malloc_printf(ASL_LEVEL_INFO
, "stack logging disabled due to previous errors.\n");
437 stack_logging_enable_logging
= 0;
438 malloc_logger
= NULL
;
441 /* A wrapper around write() that will try to reopen the index/stack file and
442 * write to it if someone closed it underneath us (e.g. the process we just
443 * started decide to close all file descriptors except stin/err/out). Some
444 * programs like to do that and calling abort() on them is rude.
447 robust_write(int fd
, const void *buf
, size_t nbyte
) {
449 ssize_t written
= write(fd
, buf
, nbyte
);
450 if (written
== -1 && errno
== EBADF
) {
451 char *file_to_reopen
= NULL
;
452 int *fd_to_reset
= NULL
;
454 // descriptor was closed on us. We need to reopen it
455 if (fd
== index_file_descriptor
) {
456 file_to_reopen
= index_file_path
;
457 fd_to_reset
= &index_file_descriptor
;
459 else if (fd
== stack_file_descriptor
) {
460 file_to_reopen
= stack_file_path
;
461 fd_to_reset
= &stack_file_descriptor
;
463 // We don't know about this file. Return (and abort()).
464 _malloc_printf(ASL_LEVEL_INFO
, "Unknown file descriptor (it's neither the index file, nor the stacks file)\n");
468 // The file *should* already exist. If not, fail.
469 fd
= open(file_to_reopen
, O_WRONLY
| O_APPEND
);
471 // If we somehow got stdin/out/err, we need to relinquish them and
473 int fds_to_close
[3] = { 0 };
476 _malloc_printf(ASL_LEVEL_INFO
, "unable to re-open stack log file %s\n", file_to_reopen
);
480 fds_to_close
[fd
] = 1;
484 // We have an fd we like. Close the ones we opened.
485 if (fds_to_close
[0]) close(0);
486 if (fds_to_close
[1]) close(1);
487 if (fds_to_close
[2]) close(2);
491 written
= write(fd
, buf
, nbyte
);
499 ssize_t written
; // signed size_t
503 if (index_file_descriptor
== -1) {
504 if (create_log_files() == NULL
) {
509 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
510 p
= pre_write_backtrace_event_buffer
;
511 remaining
= (size_t)pre_write_buffers
->next_free_stack_buffer_offset
;
512 while (remaining
> 0) {
513 written
= robust_write(stack_file_descriptor
, p
, remaining
);
515 _malloc_printf(ASL_LEVEL_INFO
, "Unable to write to stack logging file %s (%s)\n", stack_file_path
, strerror(errno
));
516 disable_stack_logging();
520 remaining
-= written
;
522 p
= pre_write_index_buffer
;
523 remaining
= (size_t)pre_write_buffers
->next_free_index_buffer_offset
;
524 while (remaining
> 0) {
525 written
= robust_write(index_file_descriptor
, p
, remaining
);
527 _malloc_printf(ASL_LEVEL_INFO
, "Unable to write to stack logging file %s (%s)\n", index_file_path
, strerror(errno
));
528 disable_stack_logging();
532 remaining
-= written
;
535 pre_write_buffers
->start_stack_offset
+= pre_write_buffers
->next_free_stack_buffer_offset
;
536 pre_write_buffers
->start_index_offset
+= pre_write_buffers
->next_free_index_buffer_offset
;
537 pre_write_buffers
->next_free_index_buffer_offset
= pre_write_buffers
->next_free_stack_buffer_offset
= 0;
541 prepare_to_log_stacks(void)
543 if (!pre_write_buffers
) {
544 last_logged_malloc_address
= 0ul;
545 logging_use_compaction
= (stack_logging_dontcompact
? 0 : logging_use_compaction
);
547 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
548 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
549 // the contents of these buffers don't change while being inspected.
550 char shmem_name_string
[PATH_MAX
];
551 strlcpy(shmem_name_string
, stack_logging_directory_base_name
, (size_t)PATH_MAX
);
552 append_int(shmem_name_string
, getpid());
554 int shmid
= shm_open(shmem_name_string
, O_RDWR
| O_CREAT
, S_IRUSR
| S_IWUSR
);
556 // Failed to create shared memory region; turn off stack logging.
557 _malloc_printf(ASL_LEVEL_INFO
, "error while allocating shared memory for disk-based stack logging output buffers\n");
558 disable_stack_logging();
562 size_t full_shared_mem_size
= sizeof(stack_buffer_shared_memory
);
563 ftruncate(shmid
, (off_t
)full_shared_mem_size
);
564 pre_write_buffers
= (stack_buffer_shared_memory
*)mmap(0, full_shared_mem_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, shmid
, (off_t
)0);
567 if (!pre_write_buffers
) {
568 _malloc_printf(ASL_LEVEL_INFO
, "error mapping in shared memory for disk-based stack logging output buffers\n");
569 disable_stack_logging();
573 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
574 pre_write_buffers
->start_index_offset
= pre_write_buffers
->start_stack_offset
= 0ull;
575 pre_write_buffers
->next_free_index_buffer_offset
= pre_write_buffers
->next_free_stack_buffer_offset
= 0;
576 pre_write_backtrace_event_buffer
= pre_write_buffers
->stack_buffer
;
577 pre_write_index_buffer
= pre_write_buffers
->index_buffer
;
579 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
580 atexit(delete_log_files
); // atexit() can call malloc()
581 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
583 // this call to flush data ensures that the log files (while possibly empty) exist; analyzing processes will rely on this assumption.
589 __disk_stack_logging_log_stack(uint32_t type_flags
, uintptr_t zone_ptr
, uintptr_t size
, uintptr_t ptr_arg
, uintptr_t return_val
, uint32_t num_hot_to_skip
)
591 if (!stack_logging_enable_logging
) return;
593 // check incoming data
594 if (type_flags
& stack_logging_type_alloc
&& type_flags
& stack_logging_type_dealloc
) {
595 uintptr_t swapper
= size
;
598 if (ptr_arg
== return_val
) return; // realloc had no effect, skipping
600 if (ptr_arg
== 0) { // realloc(NULL, size) same as malloc(size)
601 type_flags
^= stack_logging_type_dealloc
;
603 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
604 __disk_stack_logging_log_stack(stack_logging_type_dealloc
, zone_ptr
, ptr_arg
, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip
+ 1);
605 __disk_stack_logging_log_stack(stack_logging_type_alloc
, zone_ptr
, size
, (uintptr_t)0, return_val
, num_hot_to_skip
+ 1);
609 if (type_flags
& stack_logging_type_dealloc
) {
613 } else return; // free(nil)
615 if (type_flags
& stack_logging_type_alloc
&& return_val
== 0) return; // alloc that failed
619 // now actually begin
620 prepare_to_log_stacks();
622 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
623 if (!stack_logging_enable_logging
) return;
624 vm_address_t self_thread
= (vm_address_t
)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
627 OSSpinLockLock(&stack_logging_lock
);
630 if (last_logged_malloc_address
&& (type_flags
& stack_logging_type_dealloc
) && STACK_LOGGING_DISGUISE(ptr_arg
) == last_logged_malloc_address
) {
631 // *waves hand* the last allocation never occurred
632 pre_write_buffers
->next_free_index_buffer_offset
-= (uint32_t)sizeof(stack_logging_index_event
);
633 pre_write_buffers
->next_free_stack_buffer_offset
-= last_logged_backtrace_offset_diff
;
634 total_offset
-= (int64_t)last_logged_backtrace_offset_diff
;
635 last_logged_malloc_address
= 0ul;
637 // not going to subtract from the current_stack_buffer or current_logging_index indecies;
638 // there is no intention to restore the previously held stack. the differencing history
639 // must be reset to its previous value, though.
640 thread_buffer
[compaction_saved_differencing_history
.hash_pos
] = compaction_saved_differencing_history
;
642 OSSpinLockUnlock(&stack_logging_lock
);
646 // locate previous backtrace for this thread
647 short difference
= 1;
649 uint32_t collisions
= STACK_LOGGING_MAX_THREAD_COLLISIONS
;
650 uint32_t hashed_thread
= self_thread
& (STACK_LOGGING_THREAD_HASH_SIZE
-1);
651 while (thread_buffer
[hashed_thread
].thread
&& thread_buffer
[hashed_thread
].thread
!= self_thread
) {
652 if (--collisions
== 0) {
661 thread_stack_pcs(stack_buffer
[current_stack_buffer
], STACK_LOGGING_MAX_STACK_SIZE
, &count
);
662 stack_buffer
[current_stack_buffer
][count
++] = self_thread
+ 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
663 num_hot_to_skip
+= 2;
664 if (count
<= num_hot_to_skip
) {
665 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
666 OSSpinLockUnlock(&stack_logging_lock
);
670 // easy access variables
671 thread_backtrace_history
*historical
= &thread_buffer
[hashed_thread
];
672 vm_address_t
*frames
= stack_buffer
[current_stack_buffer
];
674 // increment as necessary
675 current_logging_index
++;
676 current_stack_buffer
++;
677 if (current_stack_buffer
== STACK_LOGGING_NUMBER_RECENT_BACKTRACES
) current_stack_buffer
= 0;
679 // difference (if possible)
680 if (historical
->logging_index
+ STACK_LOGGING_NUMBER_RECENT_BACKTRACES
<= current_logging_index
) difference
= 0;
681 else if (historical
->full_backtrace_countdown
== 0) difference
= 0;
683 uint32_t sameness
= 0;
685 uint32_t old_count
= historical
->backtrace_length
;
686 int32_t new_count
= (int32_t)count
;
687 while (old_count
-- && new_count
-- > (int32_t)num_hot_to_skip
) {
688 if (historical
->backtrace
[old_count
] == frames
[new_count
]) sameness
++;
692 if (sameness
< STACK_LOGGING_MIN_SAME_FRAMES
) { // failure; pretend nothing was the same
697 // create events for byte storage
698 count
-= num_hot_to_skip
;
699 stack_logging_backtrace_event current_event
;
700 current_event
.num_identical_frames
= (difference
? sameness
: 0);
701 current_event
.num_new_hot_frames
= (difference
? count
- sameness
: count
);
702 current_event
.offset_delta
= (difference
? historical
->logging_offset
- total_offset
: 0);
703 int64_t this_offset_change
= sizeof(stack_logging_backtrace_event
) + (current_event
.num_new_hot_frames
* sizeof(uintptr_t));
705 stack_logging_index_event current_index
;
706 if (type_flags
& stack_logging_type_alloc
) {
707 current_index
.address
= STACK_LOGGING_DISGUISE(return_val
);
708 current_index
.argument
= size
;
709 if (logging_use_compaction
) {
710 last_logged_malloc_address
= current_index
.address
; // disguised
711 last_logged_backtrace_offset_diff
= (uint32_t)this_offset_change
;
712 compaction_saved_differencing_history
= *historical
;
715 current_index
.address
= STACK_LOGGING_DISGUISE(ptr_arg
);
716 current_index
.argument
= 0ul;
717 last_logged_malloc_address
= 0ul;
719 current_index
.offset_and_flags
= STACK_LOGGING_OFFSET_AND_FLAGS(total_offset
, type_flags
);
721 // prepare for differencing next time
722 historical
->backtrace
= (uintptr_t*)(frames
+ num_hot_to_skip
);
723 historical
->backtrace_length
= count
;
724 if (difference
) historical
->full_backtrace_countdown
--;
725 else historical
->full_backtrace_countdown
= STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY
;
726 historical
->logging_index
= current_logging_index
;
727 historical
->logging_offset
= total_offset
;
728 historical
->thread
= self_thread
;
729 historical
->hash_pos
= hashed_thread
;
731 // flush the data buffer to disk if necessary
732 if (pre_write_buffers
->next_free_stack_buffer_offset
+ this_offset_change
>= STACK_LOGGING_BLOCK_WRITING_SIZE
) {
734 } else if (pre_write_buffers
->next_free_index_buffer_offset
+ sizeof(stack_logging_index_event
) >= STACK_LOGGING_BLOCK_WRITING_SIZE
) {
738 // store bytes in buffers
739 memcpy(pre_write_index_buffer
+pre_write_buffers
->next_free_index_buffer_offset
, ¤t_index
, sizeof(stack_logging_index_event
));
740 memcpy(pre_write_backtrace_event_buffer
+pre_write_buffers
->next_free_stack_buffer_offset
, ¤t_event
, sizeof(stack_logging_backtrace_event
));
741 memcpy(pre_write_backtrace_event_buffer
+pre_write_buffers
->next_free_stack_buffer_offset
+sizeof(stack_logging_backtrace_event
), frames
+num_hot_to_skip
, (size_t)this_offset_change
- sizeof(stack_logging_backtrace_event
));
742 pre_write_buffers
->next_free_index_buffer_offset
+= (uint32_t)sizeof(stack_logging_index_event
);
743 pre_write_buffers
->next_free_stack_buffer_offset
+= (uint32_t)this_offset_change
;
744 total_offset
+= this_offset_change
;
746 OSSpinLockUnlock(&stack_logging_lock
);
750 #pragma mark Remote Stack Log Access
752 #pragma mark - Design notes:
756 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
757 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
758 // Gets the last allocation record about address
760 if !address, will load both index and stack logs and iterate through (expensive)
761 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
762 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
763 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
765 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
766 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
767 // Given a uniqued_stack fills stack_frames_buffer
771 #pragma mark - caching
773 static inline size_t hash_index_32(uint32_t address
, size_t max_pos
) __attribute__((always_inline
));
774 static inline size_t hash_index_32(uint32_t address
, size_t max_pos
) {
775 // return (((OSSwapInt32(address >> 2) << 3) & 0x96AAAA98) ^ (address >> 2)) % (max_pos-1);
776 return (address
>> 2) % (max_pos
-1); // simplicity rules.
779 static inline size_t hash_index_64(uint64_t address
, size_t max_pos
) __attribute__((always_inline
));
780 static inline size_t hash_index_64(uint64_t address
, size_t max_pos
) {
781 // return (size_t)((((OSSwapInt64(address >> 3) << 2) & 0x54AA0A0AAA54ull) ^ (address >> 3)) % (max_pos - 1));
782 return (size_t)((address
>> 3) % (max_pos
-1)); // simplicity rules.
786 transfer_node_ll32(remote_index_cache
*cache
, remote_index_node32
*old_node
)
788 uint32_t collisions
= 0;
789 size_t pos
= hash_index_32(old_node
->address
, cache
->cache_node_capacity
);
791 if (cache
->casted_table32
[pos
].address
== old_node
->address
) { // hit like this shouldn't happen.
792 fprintf(stderr
, "impossible collision! two address==address lists! (transfer_node_ll32)\n");
794 } else if (cache
->casted_table32
[pos
].address
== 0) { // empty
795 cache
->casted_table32
[pos
] = *old_node
;
799 if (pos
>= cache
->cache_node_capacity
) pos
= 0;
802 } while (collisions
<= cache
->collision_allowance
);
804 if (collisions
> cache
->collision_allowance
) {
805 fprintf(stderr
, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll32)\n", sizeof(void*)*8);
810 transfer_node_ll64(remote_index_cache
*cache
, remote_index_node64
*old_node
)
812 uint32_t collisions
= 0;
813 size_t pos
= hash_index_64(old_node
->address
, cache
->cache_node_capacity
);
815 if (cache
->casted_table64
[pos
].address
== old_node
->address
) { // hit!
816 fprintf(stderr
, "impossible collision! two address==address lists! (transfer_node_ll64)\n");
818 } else if (cache
->casted_table64
[pos
].address
== 0) { // empty
819 cache
->casted_table64
[pos
] = *old_node
;
823 if (pos
>= cache
->cache_node_capacity
) pos
= 0;
826 } while (collisions
<= cache
->collision_allowance
);
828 if (collisions
> cache
->collision_allowance
) {
829 fprintf(stderr
, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll64)\n", sizeof(void*)*8);
834 expand_cache(remote_index_cache
*cache
)
837 size_t old_node_capacity
= cache
->cache_node_capacity
;
838 uint64_t old_node_count
= cache
->cache_node_count
;
839 uint64_t old_llnode_count
= cache
->cache_llnode_count
;
840 void *old_table
= cache
->table_memory
;
843 cache
->cache_size
<<= 1;
844 cache
->cache_node_capacity
<<= 1;
845 cache
->collision_allowance
+= STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE
;
846 cache
->table_memory
= (void*)calloc(cache
->cache_node_capacity
, cache
->in_use_node_size
);
847 if (cache
->casted_table32
) cache
->casted_table32
= cache
->table_memory
;
848 else cache
->casted_table64
= cache
->table_memory
;
850 // repopulate (expensive!)
852 if (cache
->casted_table32
) { // if target is 32-bit
853 remote_index_node32
*casted_old_table
= (remote_index_node32
*)old_table
;
854 for (i
= 0; i
< old_node_capacity
; i
++) {
855 if (casted_old_table
[i
].address
) {
856 transfer_node_ll32(cache
, &casted_old_table
[i
]);
860 remote_index_node64
*casted_old_table
= (remote_index_node64
*)old_table
;
861 for (i
= 0; i
< old_node_capacity
; i
++) {
862 if (casted_old_table
[i
].address
) {
863 transfer_node_ll64(cache
, &casted_old_table
[i
]);
868 cache
->cache_node_count
= old_node_count
;
869 cache
->cache_llnode_count
= old_llnode_count
;
871 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
875 insert_node32(remote_index_cache
*cache
, uint32_t address
, uint64_t index_file_offset
)
877 uint32_t collisions
= 0;
878 size_t pos
= hash_index_32(address
, cache
->cache_node_capacity
);
880 if (cache
->next_block_index
>= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK
) {
881 cache
->next_block_index
= 0;
882 cache
->current_block
++;
883 cache
->blocks
[cache
->current_block
] = (index_ll_node
*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK
*sizeof(index_ll_node
));
884 /* printf("node buffer added. total nodes: %ul (%u buffers, %0.2f mb)\n", STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*(cache->current_block+1),
885 cache->current_block+1, ((float)(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node)*(cache->current_block+1)))/(1 << 20));
888 index_ll_node
*new_node
= &cache
->blocks
[cache
->current_block
][cache
->next_block_index
++];
889 new_node
->index_file_offset
= index_file_offset
;
890 new_node
->next
= NULL
;
892 bool inserted
= false;
894 if (cache
->casted_table32
[pos
].address
== address
) { // hit!
895 cache
->casted_table32
[pos
].last_link
->next
= new_node
; // insert at end
896 cache
->casted_table32
[pos
].last_link
= new_node
;
899 } else if (cache
->casted_table32
[pos
].address
== 0) { // empty
900 cache
->casted_table32
[pos
].address
= address
;
901 cache
->casted_table32
[pos
].linked_list
= new_node
;
902 cache
->casted_table32
[pos
].last_link
= new_node
;
903 cache
->cache_node_count
++;
908 if (pos
>= cache
->cache_node_capacity
) pos
= 0;
911 if (collisions
> cache
->collision_allowance
) {
913 pos
= hash_index_32(address
, cache
->cache_node_capacity
);
918 cache
->cache_llnode_count
++;
923 insert_node64(remote_index_cache
*cache
, uint64_t address
, uint64_t index_file_offset
)
925 uint32_t collisions
= 0;
926 size_t pos
= hash_index_64(address
, cache
->cache_node_capacity
);
928 if (cache
->next_block_index
>= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK
) {
929 cache
->next_block_index
= 0;
930 cache
->current_block
++;
931 cache
->blocks
[cache
->current_block
] = (index_ll_node
*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK
*sizeof(index_ll_node
));
933 index_ll_node
*new_node
= &cache
->blocks
[cache
->current_block
][cache
->next_block_index
++];
934 new_node
->index_file_offset
= index_file_offset
;
935 new_node
->next
= NULL
;
937 bool inserted
= false;
939 if (cache
->casted_table64
[pos
].address
== address
) { // hit!
940 cache
->casted_table64
[pos
].last_link
->next
= new_node
; // insert at end
941 cache
->casted_table64
[pos
].last_link
= new_node
;
944 } else if (cache
->casted_table64
[pos
].address
== 0) { // empty
945 cache
->casted_table64
[pos
].address
= address
;
946 cache
->casted_table64
[pos
].linked_list
= new_node
;
947 cache
->casted_table64
[pos
].last_link
= new_node
;
952 if (pos
>= cache
->cache_node_capacity
) pos
= 0;
955 if (collisions
> cache
->collision_allowance
) {
957 pos
= hash_index_64(address
, cache
->cache_node_capacity
);
965 update_cache_for_file_streams(remote_task_file_streams
*descriptors
)
967 remote_index_cache
*cache
= descriptors
->cache
;
969 // create from scratch if necessary.
971 descriptors
->cache
= cache
= (remote_index_cache
*)calloc((size_t)1, sizeof(remote_index_cache
));
972 cache
->cache_node_capacity
= STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY
;
973 cache
->collision_allowance
= STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE
;
974 cache
->cache_node_count
= cache
->cache_llnode_count
= 0;
975 cache
->last_index_file_offset
= 0;
976 cache
->next_block_index
= 0;
977 cache
->current_block
= 0;
978 cache
->blocks
[0] = (index_ll_node
*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK
*sizeof(index_ll_node
));
979 cache
->in_use_node_size
= (descriptors
->task_is_64_bit
? sizeof(remote_index_node64
) : sizeof(remote_index_node32
));
980 cache
->cache_size
= cache
->cache_node_capacity
*cache
->in_use_node_size
;
981 cache
->table_memory
= (void*)calloc(cache
->cache_node_capacity
, cache
->in_use_node_size
);
982 if (descriptors
->task_is_64_bit
) cache
->casted_table64
= (remote_index_node64
*)(cache
->table_memory
);
983 else cache
->casted_table32
= (remote_index_node32
*)(cache
->table_memory
);
985 // now map in the shared memory, if possible
986 char shmem_name_string
[PATH_MAX
];
987 strlcpy(shmem_name_string
, stack_logging_directory_base_name
, (size_t)PATH_MAX
);
988 append_int(shmem_name_string
, descriptors
->remote_pid
);
990 int shmid
= shm_open(shmem_name_string
, O_RDWR
, S_IRUSR
| S_IWUSR
);
992 cache
->shmem
= mmap(0, sizeof(stack_buffer_shared_memory
), PROT_READ
| PROT_WRITE
, MAP_SHARED
, shmid
, (off_t
)0);
996 if (shmid
< 0 || cache
->shmem
== NULL
) {
997 // failed to connect to the shared memory region; warn and continue.
998 _malloc_printf(ASL_LEVEL_INFO
, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1002 // suspend and see how much updating there is to do. there are three scenarios, listed below
1003 bool update_snapshot
= false;
1004 if (descriptors
->remote_task
!= mach_task_self()) {
1005 task_suspend(descriptors
->remote_task
);
1008 struct stat file_statistics
;
1009 fstat(fileno(descriptors
->index_file_stream
), &file_statistics
);
1010 size_t read_size
= (descriptors
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1011 uint64_t read_this_update
= 0;
1013 // the delta indecies is a complex number; there are three cases:
1014 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1015 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1016 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1017 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1018 uint64_t delta_indecies
= (file_statistics
.st_size
- cache
->last_index_file_offset
) / read_size
;
1019 uint32_t last_snapshot_scan_index
= 0;
1020 if (delta_indecies
&& cache
->shmem
) {
1021 // case 3: add cache scanned to known from disk and recalc
1022 cache
->last_index_file_offset
+= cache
->snapshot
.next_free_index_buffer_offset
;
1023 delta_indecies
= (file_statistics
.st_size
- cache
->last_index_file_offset
) / read_size
;
1024 update_snapshot
= true;
1025 } else if (cache
->shmem
) {
1026 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1027 last_snapshot_scan_index
= cache
->snapshot
.next_free_index_buffer_offset
/ (uint32_t)read_size
;
1030 // no update necessary for the file; check if need a snapshot.
1031 if (delta_indecies
== 0) {
1032 if (cache
->shmem
&& !update_snapshot
) {
1033 update_snapshot
= (cache
->shmem
->next_free_index_buffer_offset
!= cache
->snapshot
.next_free_index_buffer_offset
);
1037 // if a snapshot is necessary, memcpy from remote frozen process' memory
1038 // note: there were two ways to do this Ð spin lock or suspend. suspend allows us to
1039 // analyze processes even if they were artificially suspended. with a lock, there'd be
1040 // worry that the target was suspended with the lock taken.
1041 if (update_snapshot
) {
1042 memcpy(&cache
->snapshot
, cache
->shmem
, sizeof(stack_buffer_shared_memory
));
1046 if (descriptors
->remote_task
!= mach_task_self()) {
1047 task_resume(descriptors
->remote_task
);
1050 if (!update_snapshot
&& delta_indecies
== 0) return; // absolutely no updating needed.
1052 FILE *the_index
= (descriptors
->index_file_stream
);
1054 // prepare for the read; target process could be 32 or 64 bit.
1056 stack_logging_index_event32
*target_32_index
= NULL
;
1057 stack_logging_index_event64
*target_64_index
= NULL
;
1059 // perform the update from the file
1061 if (delta_indecies
) {
1062 char bufferSpace
[4096]; // 4 kb
1063 target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1064 target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1065 size_t number_slots
= (size_t)(4096/read_size
);
1067 size_t read_count
= 0;
1068 if (fseeko(the_index
, (off_t
)(cache
->last_index_file_offset
), SEEK_SET
)) {
1069 fprintf(stderr
, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1071 off_t current_index_position
= cache
->last_index_file_offset
;
1073 number_slots
= MIN(delta_indecies
- read_this_update
, number_slots
);
1074 read_count
= fread(bufferSpace
, read_size
, number_slots
, the_index
);
1075 if (descriptors
->task_is_64_bit
) {
1076 for (i
= 0; i
< read_count
; i
++) {
1077 insert_node64(cache
, STACK_LOGGING_DISGUISE(target_64_index
[i
].address
), (uint64_t)current_index_position
);
1079 current_index_position
+= read_size
;
1082 for (i
= 0; i
< read_count
; i
++) {
1083 insert_node32(cache
, STACK_LOGGING_DISGUISE(target_32_index
[i
].address
), (uint64_t)current_index_position
);
1085 current_index_position
+= read_size
;
1088 } while (read_count
);
1090 if (read_this_update
< delta_indecies
) {
1091 fprintf(stderr
, "insufficient data in remote stack index file; expected more records.\n");
1093 cache
->last_index_file_offset
+= read_this_update
* read_size
;
1096 if (update_snapshot
) {
1097 target_32_index
= (stack_logging_index_event32
*)(cache
->snapshot
.index_buffer
);
1098 target_64_index
= (stack_logging_index_event64
*)(cache
->snapshot
.index_buffer
);
1100 uint32_t free_snapshot_scan_index
= cache
->snapshot
.next_free_index_buffer_offset
/ (uint32_t)read_size
;
1101 off_t current_index_position
= cache
->snapshot
.start_index_offset
;
1102 if (descriptors
->task_is_64_bit
) {
1103 for (i
= last_snapshot_scan_index
; i
< free_snapshot_scan_index
; i
++) {
1104 insert_node64(cache
, STACK_LOGGING_DISGUISE(target_64_index
[i
].address
), (uint64_t)(current_index_position
+ (i
* read_size
)));
1107 for (i
= last_snapshot_scan_index
; i
< free_snapshot_scan_index
; i
++) {
1108 insert_node32(cache
, STACK_LOGGING_DISGUISE(target_32_index
[i
].address
), (uint64_t)(current_index_position
+ (i
* read_size
)));
1116 destroy_cache_for_file_streams(remote_task_file_streams
*descriptors
)
1119 for (i
= 0; i
<= descriptors
->cache
->current_block
; i
++) {
1120 free(descriptors
->cache
->blocks
[i
]); // clears the linked list nodes.
1122 if (descriptors
->cache
->shmem
) {
1123 munmap(descriptors
->cache
->shmem
, sizeof(stack_buffer_shared_memory
));
1125 free(descriptors
->cache
->table_memory
);
1126 free(descriptors
->cache
);
1127 descriptors
->cache
= NULL
;
1130 #pragma mark - internal
1132 // In the stack log analysis process, find the stack logging files for target process <pid>
1133 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1134 // If we find such a directory then open the stack logging files in there.
1136 open_log_files(pid_t pid
, remote_task_file_streams
*this_task_streams
)
1139 struct dirent
*entry
;
1141 char prefix_name
[PATH_MAX
];
1142 char pathname
[PATH_MAX
];
1144 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1146 if ((dp
= opendir(temporary_directory
)) == NULL
) {
1150 // It's OK to use snprintf in this routine since it should only be called by the clients
1151 // of stack logging, and thus calls to malloc are OK.
1152 snprintf(prefix_name
, PATH_MAX
, "%s%d.", stack_logging_directory_base_name
, pid
); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1153 prefix_length
= strlen(prefix_name
);
1155 while ( (entry
= readdir(dp
)) != NULL
) {
1156 if ( strncmp( entry
->d_name
, prefix_name
, prefix_length
) == 0 ) {
1157 snprintf(pathname
, PATH_MAX
, "%s/%s/%s", temporary_directory
, entry
->d_name
, index_file_name
);
1158 this_task_streams
->index_file_stream
= fopen(pathname
, "r");
1160 snprintf(pathname
, PATH_MAX
, "%s/%s/%s", temporary_directory
, entry
->d_name
, stack_file_name
);
1161 this_task_streams
->stack_file_stream
= fopen(pathname
, "r");
1169 static remote_task_file_streams
*
1170 retain_file_streams_for_task(task_t task
)
1172 OSSpinLockLock(&remote_fd_list_lock
);
1174 // see if they're already in use
1176 for (i
= 0; i
< remote_task_fd_count
; i
++) {
1177 if (remote_fds
[i
].remote_task
== task
) {
1178 remote_fds
[i
].in_use_count
++;
1179 OSSpinLockUnlock(&remote_fd_list_lock
);
1180 return &remote_fds
[i
];
1185 uint32_t failures
= 0;
1186 if (remote_task_fd_count
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) {
1187 while (remote_fds
[next_remote_task_fd
].in_use_count
> 0) {
1188 next_remote_task_fd
++;
1189 if (next_remote_task_fd
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) next_remote_task_fd
= 0;
1191 if (failures
>= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) {
1192 OSSpinLockUnlock(&remote_fd_list_lock
);
1196 fclose(remote_fds
[next_remote_task_fd
].index_file_stream
);
1197 fclose(remote_fds
[next_remote_task_fd
].stack_file_stream
);
1198 destroy_cache_for_file_streams(&remote_fds
[next_remote_task_fd
]);
1202 kern_return_t err
= pid_for_task(task
, &pid
);
1203 if (err
!= KERN_SUCCESS
) {
1204 OSSpinLockUnlock(&remote_fd_list_lock
);
1208 remote_task_file_streams
*this_task_streams
= &remote_fds
[next_remote_task_fd
];
1210 open_log_files(pid
, this_task_streams
);
1212 // check if opens failed
1213 if (this_task_streams
->index_file_stream
== NULL
|| this_task_streams
->stack_file_stream
== NULL
) {
1214 if (this_task_streams
->index_file_stream
) fclose(this_task_streams
->index_file_stream
);
1215 if (this_task_streams
->stack_file_stream
) fclose(this_task_streams
->stack_file_stream
);
1216 OSSpinLockUnlock(&remote_fd_list_lock
);
1220 // check if target pid is running 64-bit
1221 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid
};
1222 struct kinfo_proc processInfo
;
1223 size_t bufsize
= sizeof(processInfo
);
1224 if (sysctl(mib
, (unsigned)(sizeof(mib
)/sizeof(int)), &processInfo
, &bufsize
, NULL
, (size_t)0) == 0 && bufsize
> 0) {
1225 this_task_streams
->task_is_64_bit
= processInfo
.kp_proc
.p_flag
& P_LP64
;
1227 this_task_streams
->task_is_64_bit
= 0;
1230 // otherwise set vars and go
1231 this_task_streams
->in_use_count
= 1;
1232 this_task_streams
->remote_task
= task
;
1233 this_task_streams
->remote_pid
= pid
;
1234 next_remote_task_fd
++;
1235 if (next_remote_task_fd
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) next_remote_task_fd
= 0;
1236 remote_task_fd_count
= MIN(remote_task_fd_count
+ 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
);
1238 OSSpinLockUnlock(&remote_fd_list_lock
);
1239 return this_task_streams
;
1243 release_file_streams_for_task(task_t task
)
1245 OSSpinLockLock(&remote_fd_list_lock
);
1247 // decrement in-use count
1249 for (i
= 0; i
< remote_task_fd_count
; i
++) {
1250 if (remote_fds
[i
].remote_task
== task
) {
1251 remote_fds
[i
].in_use_count
--;
1256 OSSpinLockUnlock(&remote_fd_list_lock
);
1259 #pragma mark - extern
1262 __mach_stack_logging_get_frames(task_t task
, mach_vm_address_t address
, mach_vm_address_t
*stack_frames_buffer
, uint32_t max_stack_frames
, uint32_t *count
)
1264 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
);
1265 if (remote_fd
== NULL
) {
1266 return KERN_FAILURE
;
1269 update_cache_for_file_streams(remote_fd
);
1271 uint32_t collisions
= 0;
1272 uint64_t located_file_position
= 0;
1275 if (remote_fd
->task_is_64_bit
) {
1276 hash
= hash_index_64(address
, remote_fd
->cache
->cache_node_capacity
);
1278 if (remote_fd
->cache
->casted_table64
[hash
].address
== address
) { // hit!
1279 located_file_position
= remote_fd
->cache
->casted_table64
[hash
].last_link
->index_file_offset
;
1282 } else if (remote_fd
->cache
->casted_table64
[hash
].address
== 0) { // failure!
1286 if (hash
>= remote_fd
->cache
->cache_node_capacity
) hash
= 0;
1287 } while (collisions
<= remote_fd
->cache
->collision_allowance
);
1289 hash
= hash_index_32((uint32_t)address
, remote_fd
->cache
->cache_node_capacity
);
1291 if (remote_fd
->cache
->casted_table32
[hash
].address
== (uint32_t)address
) { // hit!
1292 located_file_position
= remote_fd
->cache
->casted_table32
[hash
].last_link
->index_file_offset
;
1295 } else if (remote_fd
->cache
->casted_table32
[hash
].address
== 0) { // failure!
1299 if (hash
>= remote_fd
->cache
->cache_node_capacity
) hash
= 0;
1300 } while (collisions
<= remote_fd
->cache
->collision_allowance
);
1304 // prepare for the read; target process could be 32 or 64 bit.
1305 stack_logging_index_event32
*target_32_index
= NULL
;
1306 stack_logging_index_event64
*target_64_index
= NULL
;
1308 if (located_file_position
>= remote_fd
->cache
->last_index_file_offset
) {
1309 // must be in shared memory
1310 if (remote_fd
->cache
->shmem
) {
1311 if (remote_fd
->task_is_64_bit
) {
1312 target_64_index
= (stack_logging_index_event64
*)(remote_fd
->cache
->snapshot
.index_buffer
+ (located_file_position
- remote_fd
->cache
->snapshot
.start_index_offset
));
1313 located_file_position
= STACK_LOGGING_OFFSET(target_64_index
->offset_and_flags
);
1315 target_32_index
= (stack_logging_index_event32
*)(remote_fd
->cache
->snapshot
.index_buffer
+ (located_file_position
- remote_fd
->cache
->snapshot
.start_index_offset
));
1316 located_file_position
= STACK_LOGGING_OFFSET(target_32_index
->offset_and_flags
);
1323 // it's written to disk
1324 char bufferSpace
[128];
1326 size_t read_size
= (remote_fd
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1327 fseeko(remote_fd
->index_file_stream
, (off_t
)located_file_position
, SEEK_SET
);
1328 size_t read_count
= fread(bufferSpace
, read_size
, (size_t)1, remote_fd
->index_file_stream
);
1330 if (remote_fd
->task_is_64_bit
) {
1331 target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1332 located_file_position
= STACK_LOGGING_OFFSET(target_64_index
->offset_and_flags
);
1334 target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1335 located_file_position
= STACK_LOGGING_OFFSET(target_32_index
->offset_and_flags
);
1343 release_file_streams_for_task(task
);
1346 return KERN_FAILURE
;
1349 return __mach_stack_logging_frames_for_uniqued_stack(task
, located_file_position
, stack_frames_buffer
, max_stack_frames
, count
);
1354 __mach_stack_logging_enumerate_records(task_t task
, mach_vm_address_t address
, void enumerator(mach_stack_logging_record_t
, void *), void *context
)
1356 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
);
1357 if (remote_fd
== NULL
) {
1358 return KERN_FAILURE
;
1361 bool reading_all_addresses
= (address
== 0 ? true : false);
1362 mach_stack_logging_record_t pass_record
;
1363 kern_return_t err
= KERN_SUCCESS
;
1365 if (reading_all_addresses
) { // just stupidly read the index file from disk
1367 // update (read index file once and only once)
1368 update_cache_for_file_streams(remote_fd
);
1370 FILE *the_index
= (remote_fd
->index_file_stream
);
1372 // prepare for the read; target process could be 32 or 64 bit.
1373 char bufferSpace
[2048]; // 2 kb
1374 stack_logging_index_event32
*target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1375 stack_logging_index_event64
*target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1376 uint32_t target_addr_32
= (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address
);
1377 uint64_t target_addr_64
= STACK_LOGGING_DISGUISE((uint64_t)address
);
1378 size_t read_size
= (remote_fd
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1379 size_t number_slots
= (size_t)(2048/read_size
);
1380 uint64_t total_slots
= remote_fd
->cache
->last_index_file_offset
/ read_size
;
1382 // perform the search
1383 size_t read_count
= 0;
1384 int64_t current_file_offset
= 0;
1387 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1388 // and crop file reading to the point where we last scanned
1389 number_slots
= (size_t)MIN(number_slots
, total_slots
);
1391 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1392 if (number_slots
== 0) {
1393 if (remote_fd
->cache
->shmem
&& remote_fd
->cache
->snapshot
.start_index_offset
+ remote_fd
->cache
->snapshot
.next_free_index_buffer_offset
> (uint64_t)current_file_offset
) {
1394 // use shared memory
1395 target_32_index
= (stack_logging_index_event32
*)remote_fd
->cache
->snapshot
.index_buffer
;
1396 target_64_index
= (stack_logging_index_event64
*)remote_fd
->cache
->snapshot
.index_buffer
;
1397 read_count
= (uint32_t)(remote_fd
->cache
->snapshot
.start_index_offset
+ remote_fd
->cache
->snapshot
.next_free_index_buffer_offset
- current_file_offset
) / read_size
;
1398 current_file_offset
+= read_count
* read_size
;
1403 // get and save index (enumerator could modify)
1404 fseeko(the_index
, current_file_offset
, SEEK_SET
);
1405 read_count
= fread(bufferSpace
, read_size
, number_slots
, the_index
);
1406 current_file_offset
= ftello(the_index
);
1407 total_slots
-= read_count
;
1410 if (remote_fd
->task_is_64_bit
) {
1411 for (i
= 0; i
< read_count
; i
++) {
1412 if (reading_all_addresses
|| target_64_index
[i
].address
== target_addr_64
) {
1413 pass_record
.address
= STACK_LOGGING_DISGUISE(target_64_index
[i
].address
);
1414 pass_record
.argument
= target_64_index
[i
].argument
;
1415 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_64_index
[i
].offset_and_flags
);
1416 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_64_index
[i
].offset_and_flags
);
1417 enumerator(pass_record
, context
);
1421 for (i
= 0; i
< read_count
; i
++) {
1422 if (reading_all_addresses
|| target_32_index
[i
].address
== target_addr_32
) {
1423 pass_record
.address
= STACK_LOGGING_DISGUISE(target_32_index
[i
].address
);
1424 pass_record
.argument
= target_32_index
[i
].argument
;
1425 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_32_index
[i
].offset_and_flags
);
1426 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_32_index
[i
].offset_and_flags
);
1427 enumerator(pass_record
, context
);
1431 } while (read_count
);
1433 } else { // searching for a single address' history
1435 // update (read index file once and only once)
1436 update_cache_for_file_streams(remote_fd
);
1438 // get linked-list of events
1439 uint32_t collisions
= 0;
1440 uint64_t located_file_position
= 0;
1442 index_ll_node
*index_position_linked_list
= NULL
;
1443 if (remote_fd
->task_is_64_bit
) {
1444 hash
= hash_index_64(address
, remote_fd
->cache
->cache_node_capacity
);
1446 if (remote_fd
->cache
->casted_table64
[hash
].address
== address
) { // hit!
1447 index_position_linked_list
= remote_fd
->cache
->casted_table64
[hash
].linked_list
;
1449 } else if (remote_fd
->cache
->casted_table64
[hash
].address
== 0) { // failure!
1453 if (hash
>= remote_fd
->cache
->cache_node_capacity
) hash
= 0;
1454 } while (collisions
<= remote_fd
->cache
->collision_allowance
);
1456 hash
= hash_index_32((uint32_t)address
, remote_fd
->cache
->cache_node_capacity
);
1458 if (remote_fd
->cache
->casted_table32
[hash
].address
== (uint32_t)address
) { // hit!
1459 index_position_linked_list
= remote_fd
->cache
->casted_table32
[hash
].linked_list
;
1461 } else if (remote_fd
->cache
->casted_table32
[hash
].address
== 0) { // failure!
1465 if (hash
>= remote_fd
->cache
->cache_node_capacity
) hash
= 0;
1466 } while (collisions
<= remote_fd
->cache
->collision_allowance
);
1469 // if we got something, run it
1470 char bufferSpace
[128];
1471 size_t read_count
= 0;
1472 stack_logging_index_event32
*target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1473 stack_logging_index_event64
*target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1474 size_t read_size
= (remote_fd
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1475 while (index_position_linked_list
) {
1476 located_file_position
= index_position_linked_list
->index_file_offset
;
1478 if (located_file_position
>= remote_fd
->cache
->snapshot
.start_index_offset
) {
1479 if (remote_fd
->cache
->shmem
&& located_file_position
>= remote_fd
->cache
->snapshot
.start_index_offset
&& remote_fd
->cache
->snapshot
.start_index_offset
+ remote_fd
->cache
->snapshot
.next_free_index_buffer_offset
> (uint64_t)located_file_position
) {
1480 // use shared memory
1481 target_32_index
= (stack_logging_index_event32
*)(remote_fd
->cache
->snapshot
.index_buffer
+ located_file_position
- remote_fd
->cache
->snapshot
.start_index_offset
);
1482 target_64_index
= (stack_logging_index_event64
*)target_32_index
;
1489 fseeko(remote_fd
->index_file_stream
, (off_t
)located_file_position
, SEEK_SET
);
1490 read_count
= fread(bufferSpace
, read_size
, (size_t)1, remote_fd
->index_file_stream
);
1496 if (remote_fd
->task_is_64_bit
) {
1497 pass_record
.address
= STACK_LOGGING_DISGUISE(target_64_index
[0].address
);
1498 pass_record
.argument
= target_64_index
[0].argument
;
1499 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_64_index
[0].offset_and_flags
);
1500 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_64_index
[0].offset_and_flags
);
1501 enumerator(pass_record
, context
);
1503 pass_record
.address
= STACK_LOGGING_DISGUISE(target_32_index
[0].address
);
1504 pass_record
.argument
= target_32_index
[0].argument
;
1505 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_32_index
[0].offset_and_flags
);
1506 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_32_index
[0].offset_and_flags
);
1507 enumerator(pass_record
, context
);
1509 index_position_linked_list
= index_position_linked_list
->next
;
1514 release_file_streams_for_task(task
);
1520 __mach_stack_logging_frames_for_uniqued_stack(task_t task
, uint64_t stack_identifier
, mach_vm_address_t
*stack_frames_buffer
, uint32_t max_stack_frames
, uint32_t *count
)
1522 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
);
1523 if (remote_fd
== NULL
) {
1524 return KERN_FAILURE
;
1527 // prepare for initial read
1529 stack_fd
= (remote_fd
->stack_file_stream
);
1530 char bytes_buffer
[16];
1531 stack_logging_backtrace_event
*target_stack_event
= (stack_logging_backtrace_event
*)bytes_buffer
;
1532 size_t read_size
= sizeof(stack_logging_backtrace_event
);
1533 size_t read_count
= 0;
1534 off_t reading_offset
= (off_t
)stack_identifier
;
1536 // get a temporary spot for the backtrace frames to go and reference the stack space such that the reference
1537 // can be later pointed at the shared memory snapshot and data read from there.
1538 uint64_t temp_frames_buffer
[STACK_LOGGING_MAX_STACK_SIZE
];
1539 uint64_t *big_frames
= (uint64_t*)temp_frames_buffer
;
1540 uint32_t *small_frames
= (uint32_t*)temp_frames_buffer
;
1541 size_t target_frame_size
= (remote_fd
->task_is_64_bit
? sizeof(uint64_t) : sizeof(uint32_t));
1542 char *snapshot_backtrace_location
= NULL
;
1545 int32_t total_frames
= -1;
1546 int32_t hot_frames_read
= 0;
1547 size_t new_hot_frames
= 0;
1548 int32_t number_needed_hot_frames_in_event
;
1549 size_t number_hot_frames_to_skip
;
1551 bool skip_file_read
;
1555 // not in cache; read record Ð from disk if possible, shared memory snapshot if necessary.
1556 if (remote_fd
->cache
->shmem
&& reading_offset
>= (off_t
)(remote_fd
->cache
->snapshot
.start_stack_offset
)) {
1557 // must read from shared memory; the record isn't on disk yet
1558 snapshot_backtrace_location
= (remote_fd
->cache
->snapshot
.stack_buffer
+ (reading_offset
- remote_fd
->cache
->snapshot
.start_stack_offset
));
1559 *target_stack_event
= *(stack_logging_backtrace_event
*)snapshot_backtrace_location
;
1560 big_frames
= (uint64_t*)(snapshot_backtrace_location
+ sizeof(stack_logging_backtrace_event
));
1561 small_frames
= (uint32_t*)big_frames
;
1562 skip_file_read
= true;
1564 // the record's on disk
1565 i
= fseeko(stack_fd
, reading_offset
, SEEK_SET
);
1566 if (i
!= 0) break; // unable to seek to the target position
1567 read_count
= fread(target_stack_event
, read_size
, (size_t)1, stack_fd
);
1568 if (read_count
== 0) break;
1570 big_frames
= (uint64_t*)temp_frames_buffer
;
1571 small_frames
= (uint32_t*)temp_frames_buffer
;
1572 skip_file_read
= false;
1575 if (total_frames
< 0) {
1576 total_frames
= target_stack_event
->num_new_hot_frames
+ target_stack_event
->num_identical_frames
;
1577 if (total_frames
> (int32_t)max_stack_frames
) break; // don't know what to do with this; we'll just KERN_FAILURE.
1580 // do the math to find how many frames to apply from previous event
1581 new_hot_frames
= target_stack_event
->num_new_hot_frames
;
1582 number_needed_hot_frames_in_event
= total_frames
- hot_frames_read
- target_stack_event
->num_identical_frames
;
1583 number_hot_frames_to_skip
= new_hot_frames
- number_needed_hot_frames_in_event
;
1585 // read and apply the important frames of this one
1586 if (number_needed_hot_frames_in_event
> 0) {
1587 if (!skip_file_read
) {
1588 read_count
= fread(temp_frames_buffer
, target_frame_size
, new_hot_frames
, stack_fd
);
1589 if (read_count
< new_hot_frames
) break;
1592 if (remote_fd
->task_is_64_bit
) {
1593 for (i
= 0; i
< number_needed_hot_frames_in_event
; i
++) {
1594 stack_frames_buffer
[hot_frames_read
++] = big_frames
[i
+number_hot_frames_to_skip
];
1597 for (i
= 0; i
< number_needed_hot_frames_in_event
; i
++) {
1598 stack_frames_buffer
[hot_frames_read
++] = small_frames
[i
+number_hot_frames_to_skip
];
1603 reading_offset
+= target_stack_event
->offset_delta
;
1605 if (hot_frames_read
== total_frames
) done
= 1;
1606 else if (target_stack_event
->offset_delta
== 0) {
1607 fprintf(stderr
, "incomplete stack record (identifier: 0x%qx)\n", reading_offset
);
1612 release_file_streams_for_task(task
);
1615 *count
= hot_frames_read
;
1616 return KERN_SUCCESS
;
1618 return KERN_FAILURE
;
1623 #ifdef TEST_DISK_STACK_LOGGING
1625 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1627 #include <sys/wait.h>
1634 fprintf(stderr
, "master test process is %d\n", getpid());
1635 fprintf(stderr
, "sizeof stack_buffer: %d\n", sizeof(stack_buffer
));
1636 fprintf(stderr
, "sizeof thread_buffer: %d\n", sizeof(thread_buffer
));
1637 fprintf(stderr
, "sizeof stack_logs_directory: %d\n", sizeof(stack_logs_directory
));
1638 fprintf(stderr
, "sizeof remote_fds: %d\n", sizeof(remote_fds
));
1639 fprintf(stderr
, "address of pre_write_backtrace_event_buffer: %p\n", &pre_write_backtrace_event_buffer
);
1640 fprintf(stderr
, "address of logging_use_compaction: %p\n", &logging_use_compaction
);
1641 // fprintf(stderr, "size of all global data: %d\n", (logging_use_compaction) - (pre_write_backtrace_event_buffer) + sizeof(logging_use_compaction));
1645 // create a few child processes and exit them cleanly so their logs should get cleaned up
1646 fprintf(stderr
, "\ncreating child processes and exiting cleanly\n");
1647 for (i
= 0; i
< 3; i
++) {
1649 fprintf(stderr
, "\nin child processes %d\n", getpid());
1651 fprintf(stderr
, "exiting child processes %d\n", getpid());
1657 // create a few child processes and abruptly _exit them, leaving their logs around
1658 fprintf(stderr
, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1659 for (i
= 0; i
< 3; i
++) {
1661 fprintf(stderr
, "\nin child processes %d\n", getpid());
1663 fprintf(stderr
, "exiting child processes %d\n", getpid());
1669 // this should reap any remaining logs
1670 fprintf(stderr
, "\nexiting master test process %d\n", getpid());