]> git.saurik.com Git - apple/libc.git/blame - gen/stack_logging_disk.c
Libc-498.tar.gz
[apple/libc.git] / gen / stack_logging_disk.c
CommitLineData
224c7076
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <limits.h>
28#include <unistd.h>
29#include <fcntl.h>
30#include <dirent.h>
31#include <libkern/OSAtomic.h>
32#include <mach/mach.h>
33#include <mach/mach_vm.h>
34#include <sys/sysctl.h>
35#include <sys/stat.h>
36#include <sys/mman.h>
37#include <pthread.h>
38#include <errno.h>
39#include "stack_logging.h"
40#include "malloc_printf.h"
41#include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
42
43#pragma mark -
44#pragma mark Defines
45
46#ifdef TEST_DISK_STACK_LOGGING
47#define _malloc_printf fprintf
48#undef ASL_LEVEL_INFO
49#define ASL_LEVEL_INFO stderr
50#endif
51
52#define STACK_LOGGING_THREAD_HASH_SIZE 2048 // must be an even power of two
53#define STACK_LOGGING_MAX_STACK_SIZE 512
54#define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55#define STACK_LOGGING_NUMBER_RECENT_BACKTRACES 50
56#define STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY 100
57#define STACK_LOGGING_MAX_THREAD_COLLISIONS 3
58#define STACK_LOGGING_MIN_SAME_FRAMES 3
59#define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
60#define STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE 5
61#define STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY 1 << 14 // <2mb for 32->32, ~3mb for (32->64 || 64->32), ~4mb for 64->64
62#define STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE 3
63#define STACK_LOGGING_REMOTE_LINKS_PER_BLOCK (1 << 20) // this sets a maximum number of malloc/frees that can be read in to: 1^30;
64 // this means if the .index file is >24gb, remote access will start to fail.
65 // note: at this point, the .stack file will probably be ~56gb on top of that and
66 // it'll also be using around 20 gb of memory in the analyzing process...
67 // all of these are 64-bit stats; the 32-bit analyzing process limits are lower.
68 // in short, if you want to analyze a process making > 1 billion malloc/frees
69 // (after compaction), bump this number slightly.
70
71#pragma mark -
72#pragma mark Macros
73
74#define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
75#define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
76#define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
77
78#pragma mark -
79#pragma mark Types
80
81#pragma mark - stack_logging_backtrace_event
82typedef struct {
83 int16_t offset_delta; // may want to expand this one; should always be < 0.
84 uint16_t num_identical_frames;
85 uint16_t num_new_hot_frames; // count of backtrace[]
86} stack_logging_backtrace_event;
87
88#pragma mark - stack_logging_index_event
89typedef struct {
90 uintptr_t argument;
91 uintptr_t address;
92 uint64_t offset_and_flags; // top 8 bits are actually the flags!
93} stack_logging_index_event;
94
95#pragma mark - stack_logging_index_event32
96typedef struct {
97 uint32_t argument;
98 uint32_t address;
99 uint64_t offset_and_flags; // top 8 bits are actually the flags!
100} stack_logging_index_event32;
101
102#pragma mark - stack_logging_index_event64
103typedef struct {
104 uint64_t argument;
105 uint64_t address;
106 uint64_t offset_and_flags; // top 8 bits are actually the flags!
107} stack_logging_index_event64;
108
109#pragma mark - thread_backtrace_history
110// for management of previous backtraces (by thread):
111typedef struct {
112 vm_address_t thread;
113 uint32_t hash_pos;
114 uint64_t logging_index;
115 int64_t logging_offset;
116 uint32_t full_backtrace_countdown;
117 uint32_t backtrace_length;
118 uintptr_t *backtrace;
119} thread_backtrace_history;
120
121#pragma mark - stack_buffer_shared_memory
122// for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123// It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124// analyzing process with a different arch - hence the pragmas.
125#pragma pack(push,4)
126typedef struct {
127 uint64_t start_index_offset;
128 uint64_t start_stack_offset;
129 uint32_t next_free_index_buffer_offset;
130 uint32_t next_free_stack_buffer_offset;
131 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
132 char stack_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
133} stack_buffer_shared_memory;
134#pragma pack(pop)
135
136#pragma mark - index_ll_node
137// linked-list node in table for allocations of a single address
138typedef struct index_ll_node {
139 struct index_ll_node *next;
140 uint64_t index_file_offset;
141} index_ll_node;
142
143#pragma mark - remote_index_node32
144// 32-bit target process address slot in table
145typedef struct {
146 uint32_t address;
147 index_ll_node *linked_list;
148 index_ll_node *last_link;
149} remote_index_node32;
150
151#pragma mark - remote_index_node64
152// 64-bit target process variant
153typedef struct {
154 uint64_t address;
155 index_ll_node *linked_list;
156 index_ll_node *last_link;
157} remote_index_node64;
158
159#pragma mark - remote_index_cache
160// for caching index information client-side:
161typedef struct {
162 size_t cache_size;
163 size_t cache_node_capacity;
164 uint32_t collision_allowance;
165 uint64_t cache_node_count; // Debug only.
166 uint64_t cache_llnode_count; // Debug only.
167 size_t in_use_node_size; // sizeof(remote_index_node32) || sizeof(remote_index_node64)
168 void *table_memory; // this can be malloced; it's on the client side.
169 remote_index_node32 *casted_table32; // represents table memory as 32-bit.
170 remote_index_node64 *casted_table64; // ditto, 64-bit
171 stack_buffer_shared_memory *shmem; // shared memory
172 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
173 uint32_t last_pre_written_index_size;
174 uint64_t last_index_file_offset;
175 index_ll_node *blocks[1024];
176 uint32_t current_block;
177 uint32_t next_block_index;
178} remote_index_cache;
179
180#pragma mark - remote_task_file_streams
181// for reading stack history information from remote processes:
182typedef struct {
183 task_t remote_task;
184 pid_t remote_pid;
185 int32_t task_is_64_bit;
186 int32_t in_use_count;
187 FILE *index_file_stream;
188 FILE *stack_file_stream;
189 remote_index_cache *cache;
190} remote_task_file_streams;
191
192#pragma mark -
193#pragma mark Constants
194
195static stack_buffer_shared_memory *pre_write_buffers;
196static char *pre_write_backtrace_event_buffer = NULL;
197static char *pre_write_index_buffer = NULL;
198
199static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
200static uint64_t current_logging_index = 0;
201static int64_t total_offset = 0;
202
203// single-thread access variables
204static vm_address_t stack_buffer[STACK_LOGGING_NUMBER_RECENT_BACKTRACES][STACK_LOGGING_MAX_STACK_SIZE];
205static thread_backtrace_history thread_buffer[STACK_LOGGING_THREAD_HASH_SIZE];
206static int32_t current_stack_buffer = 0;
207static uintptr_t last_logged_malloc_address = 0;
208static uint32_t last_logged_backtrace_offset_diff = 0;
209static thread_backtrace_history compaction_saved_differencing_history;
210
211// Constants to define stack logging directory and path names.
212// Files will get written to /tmp/stack-logs.<pid>.<progname>.XXXXXX/stack-logs.{index,stacks}
213// The directory is securely created with mkdtemp() and the files inside it just have static names for simplicity.
214static const char *temporary_directory = "/tmp";
215static const char *stack_logging_directory_base_name = "stack-logs.";
216static const char *index_file_name = "stack-logs.index";
217static const char *stack_file_name = "stack-logs.stacks";
218
219static char stack_logs_directory[PATH_MAX];
220static char index_file_path[PATH_MAX];
221static char stack_file_path[PATH_MAX];
222static int index_file_descriptor = -1;
223static int stack_file_descriptor = -1;
224
225// for accessing remote log files
226static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
227static uint32_t next_remote_task_fd = 0;
228static uint32_t remote_task_fd_count = 0;
229static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
230
231// activation variables
232
233static int logging_use_compaction = 1; // set this to zero to always disable compaction.
234
235// We set malloc_logger to NULL to disable logging, if we encounter errors
236// during file writing
237typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
238extern malloc_logger_t *malloc_logger;
239
240#pragma mark -
241#pragma mark Disk Stack Logging
242
243static void delete_log_files(void); // pre-declare
244
245static void
246append_int(char * filename, pid_t pid)
247{
248 unsigned int value;
249 size_t len;
250 unsigned int i;
251 unsigned int count;
252
253 len = strlen(filename);
254
255 count = 0;
256 value = pid;
257 while (value > 0) {
258 value /= 10;
259 count ++;
260 }
261
262 filename[len + count] = 0;
263
264 value = pid;
265 for(i = 0 ; i < count ; i ++) {
266 filename[len + count - 1 - i] = '0' + value % 10;
267 value /= 10;
268 }
269}
270
271// If successful, returns path to directory that was created. Otherwise returns NULL.
272static char *
273create_log_files(void)
274{
275 pid_t pid = getpid();
276 const char *progname = getprogname();
277 char path_name[PATH_MAX];
278 char *created_directory = NULL;
279
280 // WARNING! use of snprintf can induce malloc() calls
281 strlcpy(stack_logs_directory, temporary_directory, PATH_MAX);
282 strlcat(stack_logs_directory, "/", PATH_MAX);
283 strlcat(stack_logs_directory, stack_logging_directory_base_name, PATH_MAX);
284 append_int(stack_logs_directory, pid);
285 if (progname && progname[0] != '\0') {
286 strlcat(stack_logs_directory, ".", PATH_MAX);
287 strlcat(stack_logs_directory, progname, PATH_MAX);
288 }
289 strlcat(stack_logs_directory, ".XXXXXX", PATH_MAX);
290
291 // Securely make temporary directory for the log files, then create the files.
292 if (mkdtemp(stack_logs_directory) == stack_logs_directory) {
293 strlcpy(path_name, stack_logs_directory, PATH_MAX);
294 strlcat(path_name, "/", PATH_MAX);
295 strlcat(path_name, index_file_name, PATH_MAX);
296 strlcpy(index_file_path, path_name, PATH_MAX);
297 index_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
298
299 strlcpy(path_name, stack_logs_directory, PATH_MAX);
300 strlcat(path_name, "/", PATH_MAX);
301 strlcat(path_name, stack_file_name, PATH_MAX);
302 strlcpy(stack_file_path, path_name, PATH_MAX);
303 stack_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
304
305 if (index_file_descriptor == -1 || stack_file_descriptor == -1) {
306 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log files in directory %s\n", stack_logs_directory);
307 delete_log_files();
308 created_directory = NULL;
309 } else {
310 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_logs_directory);
311 created_directory = stack_logs_directory;
312 }
313 } else {
314 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log directory %s\n", stack_logs_directory);
315 created_directory = NULL;
316 }
317 return created_directory;
318}
319
320// This function may be called from either the target process when exiting, or from either the the target process or
321// a stack log analysis process, when reaping orphaned stack log files.
322// Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
323static int
324delete_log_files_in_directory(char *logdir)
325{
326 char path_name[PATH_MAX];
327 int unlink_count = 0;
328 int failure_count = 0;
329 struct stat statbuf;
330
331 if (logdir == NULL || logdir[0] == '\0') return 0;
332
333 strlcpy(path_name, logdir, PATH_MAX);
334 strlcat(path_name, "/", PATH_MAX);
335 strlcat(path_name, index_file_name, PATH_MAX);
336 if (unlink(path_name) == 0) {
337 unlink_count++;
338 } else if (stat(path_name, &statbuf) == 0) {
339 failure_count++;
340 }
341
342 strlcpy(path_name, logdir, PATH_MAX);
343 strlcat(path_name, "/", PATH_MAX);
344 strlcat(path_name, stack_file_name, PATH_MAX);
345 if (unlink(path_name) == 0) {
346 unlink_count++;
347 } else if (stat(path_name, &statbuf) == 0) {
348 failure_count++;
349 }
350
351 if (rmdir(logdir) == -1) failure_count++;
352
353 return (failure_count > 0) ? -1 : 0;
354}
355
356// This function will be called from atexit() in the target process.
357static void
358delete_log_files(void)
359{
360 if (stack_logs_directory == NULL || stack_logs_directory[0] == '\0') return;
361
362 if (delete_log_files_in_directory(stack_logs_directory) == 0) {
363 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_logs_directory);
364 stack_file_path[0] = '\0';
365 index_file_path[0] = '\0';
366 } else {
367 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_logs_directory);
368 }
369}
370
371static bool
372is_process_running(pid_t pid)
373{
374 struct kinfo_proc kpt[1];
375 size_t size = sizeof(struct kinfo_proc);
376 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
377
378 sysctl(mib, 4, kpt, &size, NULL, 0); // size is either 1 or 0 entries when we ask for a single pid
379
380 return (size==sizeof(struct kinfo_proc));
381}
382
383// The log files can be quite large and aren't too useful after the process that created them no longer exists.
384// Normally they should get removed when the process exits, but if the process crashed the log files might remain.
385// So, reap any stack log files for processes that no longer exist.
386//
387// lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
388// Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
389// The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
390// process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
391// its own log files.
392static void
393reap_orphaned_log_files(bool remove_for_this_pid)
394{
395 DIR *dp;
396 struct dirent *entry;
397 int prefix_length;
398 char prefix_name[PATH_MAX];
399 char pathname[PATH_MAX];
400 pid_t current_pid = getpid();
401
402 if ((dp = opendir(temporary_directory)) == NULL) {
403 return;
404 }
405
406 strlcpy(prefix_name, stack_logging_directory_base_name, PATH_MAX);
407 prefix_length = strlen(prefix_name);
408
409 while ( (entry = readdir(dp)) != NULL ) {
410 if ( entry->d_type == DT_DIR && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
411 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
412 if ( (! is_process_running(pid)) || (remove_for_this_pid && pid == current_pid) ) {
413 strlcpy(pathname, temporary_directory, PATH_MAX);
414 strlcat(pathname, "/", PATH_MAX);
415 strlcat(pathname, entry->d_name, PATH_MAX);
416 if (delete_log_files_in_directory(pathname) == 0) {
417 if (remove_for_this_pid && pid == current_pid) {
418 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
419 } else {
420 _malloc_printf(ASL_LEVEL_INFO, "process %d no longer exists, stack logs deleted from %s\n", pid, pathname);
421 }
422 }
423 }
424 }
425 }
426 closedir(dp);
427}
428
429/*
430 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
431 * for disabling any future logging in this process and for informing the user.
432 */
433static void
434disable_stack_logging(void)
435{
436 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
437 stack_logging_enable_logging = 0;
438 malloc_logger = NULL;
439}
440
441/* A wrapper around write() that will try to reopen the index/stack file and
442 * write to it if someone closed it underneath us (e.g. the process we just
443 * started decide to close all file descriptors except stin/err/out). Some
444 * programs like to do that and calling abort() on them is rude.
445 */
446static ssize_t
447robust_write(int fd, const void *buf, size_t nbyte) {
448 extern int errno;
449 ssize_t written = write(fd, buf, nbyte);
450 if (written == -1 && errno == EBADF) {
451 char *file_to_reopen = NULL;
452 int *fd_to_reset = NULL;
453
454 // descriptor was closed on us. We need to reopen it
455 if (fd == index_file_descriptor) {
456 file_to_reopen = index_file_path;
457 fd_to_reset = &index_file_descriptor;
458 }
459 else if (fd == stack_file_descriptor) {
460 file_to_reopen = stack_file_path;
461 fd_to_reset = &stack_file_descriptor;
462 } else {
463 // We don't know about this file. Return (and abort()).
464 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor (it's neither the index file, nor the stacks file)\n");
465 return -1;
466 }
467
468 // The file *should* already exist. If not, fail.
469 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
470 if (fd < 3) {
471 // If we somehow got stdin/out/err, we need to relinquish them and
472 // get another fd.
473 int fds_to_close[3] = { 0 };
474 while (fd < 3) {
475 if (fd == -1) {
476 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack log file %s\n", file_to_reopen);
477 delete_log_files();
478 return -1;
479 }
480 fds_to_close[fd] = 1;
481 fd = dup(fd);
482 }
483
484 // We have an fd we like. Close the ones we opened.
485 if (fds_to_close[0]) close(0);
486 if (fds_to_close[1]) close(1);
487 if (fds_to_close[2]) close(2);
488 }
489
490 *fd_to_reset = fd;
491 written = write(fd, buf, nbyte);
492 }
493 return written;
494}
495
496static void
497flush_data(void)
498{
499 ssize_t written; // signed size_t
500 size_t remaining;
501 char * p;
502
503 if (index_file_descriptor == -1) {
504 if (create_log_files() == NULL) {
505 return;
506 }
507 }
508
509 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
510 p = pre_write_backtrace_event_buffer;
511 remaining = (size_t)pre_write_buffers->next_free_stack_buffer_offset;
512 while (remaining > 0) {
513 written = robust_write(stack_file_descriptor, p, remaining);
514 if (written == -1) {
515 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", stack_file_path, strerror(errno));
516 disable_stack_logging();
517 return;
518 }
519 p += written;
520 remaining -= written;
521 }
522 p = pre_write_index_buffer;
523 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
524 while (remaining > 0) {
525 written = robust_write(index_file_descriptor, p, remaining);
526 if (written == -1) {
527 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", index_file_path, strerror(errno));
528 disable_stack_logging();
529 return;
530 }
531 p += written;
532 remaining -= written;
533 }
534
535 pre_write_buffers->start_stack_offset += pre_write_buffers->next_free_stack_buffer_offset;
536 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
537 pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
538}
539
540static void
541prepare_to_log_stacks(void)
542{
543 if (!pre_write_buffers) {
544 last_logged_malloc_address = 0ul;
545 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
546
547 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
548 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
549 // the contents of these buffers don't change while being inspected.
550 char shmem_name_string[PATH_MAX];
551 strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
552 append_int(shmem_name_string, getpid());
553
554 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
555 if (shmid < 0) {
556 // Failed to create shared memory region; turn off stack logging.
557 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
558 disable_stack_logging();
559 return;
560 }
561
562 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
563 ftruncate(shmid, (off_t)full_shared_mem_size);
564 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
565 close(shmid);
566
567 if (!pre_write_buffers) {
568 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
569 disable_stack_logging();
570 return;
571 }
572
573 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
574 pre_write_buffers->start_index_offset = pre_write_buffers->start_stack_offset = 0ull;
575 pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
576 pre_write_backtrace_event_buffer = pre_write_buffers->stack_buffer;
577 pre_write_index_buffer = pre_write_buffers->index_buffer;
578
579 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
580 atexit(delete_log_files); // atexit() can call malloc()
581 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
582
583 // this call to flush data ensures that the log files (while possibly empty) exist; analyzing processes will rely on this assumption.
584 flush_data();
585 }
586}
587
588void
589__disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
590{
591 if (!stack_logging_enable_logging) return;
592
593 // check incoming data
594 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
595 uintptr_t swapper = size;
596 size = ptr_arg;
597 ptr_arg = swapper;
598 if (ptr_arg == return_val) return; // realloc had no effect, skipping
599
600 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
601 type_flags ^= stack_logging_type_dealloc;
602 } else {
603 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
604 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
605 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
606 return;
607 }
608 }
609 if (type_flags & stack_logging_type_dealloc) {
610 if (size) {
611 ptr_arg = size;
612 size = 0;
613 } else return; // free(nil)
614 }
615 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
616
617 type_flags &= 0x7;
618
619 // now actually begin
620 prepare_to_log_stacks();
621
622 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
623 if (!stack_logging_enable_logging) return;
624 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
625
626 // lock and enter
627 OSSpinLockLock(&stack_logging_lock);
628
629 // compaction
630 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
631 // *waves hand* the last allocation never occurred
632 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
633 pre_write_buffers->next_free_stack_buffer_offset -= last_logged_backtrace_offset_diff;
634 total_offset -= (int64_t)last_logged_backtrace_offset_diff;
635 last_logged_malloc_address = 0ul;
636
637 // not going to subtract from the current_stack_buffer or current_logging_index indecies;
638 // there is no intention to restore the previously held stack. the differencing history
639 // must be reset to its previous value, though.
640 thread_buffer[compaction_saved_differencing_history.hash_pos] = compaction_saved_differencing_history;
641
642 OSSpinLockUnlock(&stack_logging_lock);
643 return;
644 }
645
646 // locate previous backtrace for this thread
647 short difference = 1;
648
649 uint32_t collisions = STACK_LOGGING_MAX_THREAD_COLLISIONS;
650 uint32_t hashed_thread = self_thread & (STACK_LOGGING_THREAD_HASH_SIZE-1);
651 while (thread_buffer[hashed_thread].thread && thread_buffer[hashed_thread].thread != self_thread) {
652 if (--collisions == 0) {
653 difference = 0;
654 break;
655 }
656 hashed_thread++;
657 }
658
659 // gather stack
660 uint32_t count;
661 thread_stack_pcs(stack_buffer[current_stack_buffer], STACK_LOGGING_MAX_STACK_SIZE, &count);
662 stack_buffer[current_stack_buffer][count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
663 num_hot_to_skip += 2;
664 if (count <= num_hot_to_skip) {
665 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
666 OSSpinLockUnlock(&stack_logging_lock);
667 return;
668 }
669
670 // easy access variables
671 thread_backtrace_history *historical = &thread_buffer[hashed_thread];
672 vm_address_t *frames = stack_buffer[current_stack_buffer];
673
674 // increment as necessary
675 current_logging_index++;
676 current_stack_buffer++;
677 if (current_stack_buffer == STACK_LOGGING_NUMBER_RECENT_BACKTRACES) current_stack_buffer = 0;
678
679 // difference (if possible)
680 if (historical->logging_index + STACK_LOGGING_NUMBER_RECENT_BACKTRACES <= current_logging_index) difference = 0;
681 else if (historical->full_backtrace_countdown == 0) difference = 0;
682
683 uint32_t sameness = 0;
684 if (difference) {
685 uint32_t old_count = historical->backtrace_length;
686 int32_t new_count = (int32_t)count;
687 while (old_count-- && new_count-- > (int32_t)num_hot_to_skip) {
688 if (historical->backtrace[old_count] == frames[new_count]) sameness++;
689 else break;
690 }
691
692 if (sameness < STACK_LOGGING_MIN_SAME_FRAMES) { // failure; pretend nothing was the same
693 difference = 0;
694 }
695 }
696
697 // create events for byte storage
698 count -= num_hot_to_skip;
699 stack_logging_backtrace_event current_event;
700 current_event.num_identical_frames = (difference ? sameness : 0);
701 current_event.num_new_hot_frames = (difference ? count - sameness : count);
702 current_event.offset_delta = (difference ? historical->logging_offset - total_offset : 0);
703 int64_t this_offset_change = sizeof(stack_logging_backtrace_event) + (current_event.num_new_hot_frames * sizeof(uintptr_t));
704
705 stack_logging_index_event current_index;
706 if (type_flags & stack_logging_type_alloc) {
707 current_index.address = STACK_LOGGING_DISGUISE(return_val);
708 current_index.argument = size;
709 if (logging_use_compaction) {
710 last_logged_malloc_address = current_index.address; // disguised
711 last_logged_backtrace_offset_diff = (uint32_t)this_offset_change;
712 compaction_saved_differencing_history = *historical;
713 }
714 } else {
715 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
716 current_index.argument = 0ul;
717 last_logged_malloc_address = 0ul;
718 }
719 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(total_offset, type_flags);
720
721 // prepare for differencing next time
722 historical->backtrace = (uintptr_t*)(frames + num_hot_to_skip);
723 historical->backtrace_length = count;
724 if (difference) historical->full_backtrace_countdown--;
725 else historical->full_backtrace_countdown = STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY;
726 historical->logging_index = current_logging_index;
727 historical->logging_offset = total_offset;
728 historical->thread = self_thread;
729 historical->hash_pos = hashed_thread;
730
731 // flush the data buffer to disk if necessary
732 if (pre_write_buffers->next_free_stack_buffer_offset + this_offset_change >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
733 flush_data();
734 } else if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
735 flush_data();
736 }
737
738 // store bytes in buffers
739 memcpy(pre_write_index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
740 memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset, &current_event, sizeof(stack_logging_backtrace_event));
741 memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset+sizeof(stack_logging_backtrace_event), frames+num_hot_to_skip, (size_t)this_offset_change - sizeof(stack_logging_backtrace_event));
742 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
743 pre_write_buffers->next_free_stack_buffer_offset += (uint32_t)this_offset_change;
744 total_offset += this_offset_change;
745
746 OSSpinLockUnlock(&stack_logging_lock);
747}
748
749#pragma mark -
750#pragma mark Remote Stack Log Access
751
752#pragma mark - Design notes:
753
754/*
755
756this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
757extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
758 // Gets the last allocation record about address
759
760if !address, will load both index and stack logs and iterate through (expensive)
761else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
762extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
763 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
764
765this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
766extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
767 // Given a uniqued_stack fills stack_frames_buffer
768
769*/
770
771#pragma mark - caching
772
773static inline size_t hash_index_32(uint32_t address, size_t max_pos) __attribute__((always_inline));
774static inline size_t hash_index_32(uint32_t address, size_t max_pos) {
775// return (((OSSwapInt32(address >> 2) << 3) & 0x96AAAA98) ^ (address >> 2)) % (max_pos-1);
776 return (address >> 2) % (max_pos-1); // simplicity rules.
777}
778
779static inline size_t hash_index_64(uint64_t address, size_t max_pos) __attribute__((always_inline));
780static inline size_t hash_index_64(uint64_t address, size_t max_pos) {
781// return (size_t)((((OSSwapInt64(address >> 3) << 2) & 0x54AA0A0AAA54ull) ^ (address >> 3)) % (max_pos - 1));
782 return (size_t)((address >> 3) % (max_pos-1)); // simplicity rules.
783}
784
785static void
786transfer_node_ll32(remote_index_cache *cache, remote_index_node32 *old_node)
787{
788 uint32_t collisions = 0;
789 size_t pos = hash_index_32(old_node->address, cache->cache_node_capacity);
790 do {
791 if (cache->casted_table32[pos].address == old_node->address) { // hit like this shouldn't happen.
792 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll32)\n");
793 break;
794 } else if (cache->casted_table32[pos].address == 0) { // empty
795 cache->casted_table32[pos] = *old_node;
796 break;
797 } else {
798 pos++;
799 if (pos >= cache->cache_node_capacity) pos = 0;
800 }
801 collisions++;
802 } while (collisions <= cache->collision_allowance);
803
804 if (collisions > cache->collision_allowance) {
805 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll32)\n", sizeof(void*)*8);
806 }
807}
808
809static void
810transfer_node_ll64(remote_index_cache *cache, remote_index_node64 *old_node)
811{
812 uint32_t collisions = 0;
813 size_t pos = hash_index_64(old_node->address, cache->cache_node_capacity);
814 do {
815 if (cache->casted_table64[pos].address == old_node->address) { // hit!
816 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll64)\n");
817 break;
818 } else if (cache->casted_table64[pos].address == 0) { // empty
819 cache->casted_table64[pos] = *old_node;
820 break;
821 } else {
822 pos++;
823 if (pos >= cache->cache_node_capacity) pos = 0;
824 }
825 collisions++;
826 } while (collisions <= cache->collision_allowance);
827
828 if (collisions > cache->collision_allowance) {
829 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll64)\n", sizeof(void*)*8);
830 }
831}
832
833static void
834expand_cache(remote_index_cache *cache)
835{
836 // keep old stats
837 size_t old_node_capacity = cache->cache_node_capacity;
838 uint64_t old_node_count = cache->cache_node_count;
839 uint64_t old_llnode_count = cache->cache_llnode_count;
840 void *old_table = cache->table_memory;
841
842 // double size
843 cache->cache_size <<= 1;
844 cache->cache_node_capacity <<= 1;
845 cache->collision_allowance += STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE;
846 cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
847 if (cache->casted_table32) cache->casted_table32 = cache->table_memory;
848 else cache->casted_table64 = cache->table_memory;
849
850 // repopulate (expensive!)
851 size_t i;
852 if (cache->casted_table32) { // if target is 32-bit
853 remote_index_node32 *casted_old_table = (remote_index_node32*)old_table;
854 for (i = 0; i < old_node_capacity; i++) {
855 if (casted_old_table[i].address) {
856 transfer_node_ll32(cache, &casted_old_table[i]);
857 }
858 }
859 } else {
860 remote_index_node64 *casted_old_table = (remote_index_node64*)old_table;
861 for (i = 0; i < old_node_capacity; i++) {
862 if (casted_old_table[i].address) {
863 transfer_node_ll64(cache, &casted_old_table[i]);
864 }
865 }
866 }
867
868 cache->cache_node_count = old_node_count;
869 cache->cache_llnode_count = old_llnode_count;
870 free(old_table);
871// printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
872}
873
874static void
875insert_node32(remote_index_cache *cache, uint32_t address, uint64_t index_file_offset)
876{
877 uint32_t collisions = 0;
878 size_t pos = hash_index_32(address, cache->cache_node_capacity);
879
880 if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
881 cache->next_block_index = 0;
882 cache->current_block++;
883 cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
884/* printf("node buffer added. total nodes: %ul (%u buffers, %0.2f mb)\n", STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*(cache->current_block+1),
885 cache->current_block+1, ((float)(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node)*(cache->current_block+1)))/(1 << 20));
886*/
887 }
888 index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
889 new_node->index_file_offset = index_file_offset;
890 new_node->next = NULL;
891
892 bool inserted = false;
893 while (!inserted) {
894 if (cache->casted_table32[pos].address == address) { // hit!
895 cache->casted_table32[pos].last_link->next = new_node; // insert at end
896 cache->casted_table32[pos].last_link = new_node;
897 inserted = true;
898 break;
899 } else if (cache->casted_table32[pos].address == 0) { // empty
900 cache->casted_table32[pos].address = address;
901 cache->casted_table32[pos].linked_list = new_node;
902 cache->casted_table32[pos].last_link = new_node;
903 cache->cache_node_count++;
904 inserted = true;
905 break;
906 } else {
907 pos++;
908 if (pos >= cache->cache_node_capacity) pos = 0;
909 }
910 collisions++;
911 if (collisions > cache->collision_allowance) {
912 expand_cache(cache);
913 pos = hash_index_32(address, cache->cache_node_capacity);
914 collisions = 0;
915 }
916 }
917
918 cache->cache_llnode_count++;
919
920}
921
922static void
923insert_node64(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
924{
925 uint32_t collisions = 0;
926 size_t pos = hash_index_64(address, cache->cache_node_capacity);
927
928 if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
929 cache->next_block_index = 0;
930 cache->current_block++;
931 cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
932 }
933 index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
934 new_node->index_file_offset = index_file_offset;
935 new_node->next = NULL;
936
937 bool inserted = false;
938 while (!inserted) {
939 if (cache->casted_table64[pos].address == address) { // hit!
940 cache->casted_table64[pos].last_link->next = new_node; // insert at end
941 cache->casted_table64[pos].last_link = new_node;
942 inserted = true;
943 break;
944 } else if (cache->casted_table64[pos].address == 0) { // empty
945 cache->casted_table64[pos].address = address;
946 cache->casted_table64[pos].linked_list = new_node;
947 cache->casted_table64[pos].last_link = new_node;
948 inserted = true;
949 break;
950 } else {
951 pos++;
952 if (pos >= cache->cache_node_capacity) pos = 0;
953 }
954 collisions++;
955 if (collisions > cache->collision_allowance) {
956 expand_cache(cache);
957 pos = hash_index_64(address, cache->cache_node_capacity);
958 collisions = 0;
959 }
960 }
961
962}
963
964static void
965update_cache_for_file_streams(remote_task_file_streams *descriptors)
966{
967 remote_index_cache *cache = descriptors->cache;
968
969 // create from scratch if necessary.
970 if (!cache) {
971 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
972 cache->cache_node_capacity = STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY;
973 cache->collision_allowance = STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE;
974 cache->cache_node_count = cache->cache_llnode_count = 0;
975 cache->last_index_file_offset = 0;
976 cache->next_block_index = 0;
977 cache->current_block = 0;
978 cache->blocks[0] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
979 cache->in_use_node_size = (descriptors->task_is_64_bit ? sizeof(remote_index_node64) : sizeof(remote_index_node32));
980 cache->cache_size = cache->cache_node_capacity*cache->in_use_node_size;
981 cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
982 if (descriptors->task_is_64_bit) cache->casted_table64 = (remote_index_node64*)(cache->table_memory);
983 else cache->casted_table32 = (remote_index_node32*)(cache->table_memory);
984
985 // now map in the shared memory, if possible
986 char shmem_name_string[PATH_MAX];
987 strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
988 append_int(shmem_name_string, descriptors->remote_pid);
989
990 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
991 if (shmid >= 0) {
992 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
993 close(shmid);
994 }
995
996 if (shmid < 0 || cache->shmem == NULL) {
997 // failed to connect to the shared memory region; warn and continue.
998 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
999 }
1000 }
1001
1002 // suspend and see how much updating there is to do. there are three scenarios, listed below
1003 bool update_snapshot = false;
1004 if (descriptors->remote_task != mach_task_self()) {
1005 task_suspend(descriptors->remote_task);
1006 }
1007
1008 struct stat file_statistics;
1009 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1010 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1011 uint64_t read_this_update = 0;
1012
1013 // the delta indecies is a complex number; there are three cases:
1014 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1015 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1016 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1017 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1018 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1019 uint32_t last_snapshot_scan_index = 0;
1020 if (delta_indecies && cache->shmem) {
1021 // case 3: add cache scanned to known from disk and recalc
1022 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1023 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1024 update_snapshot = true;
1025 } else if (cache->shmem) {
1026 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1027 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1028 }
1029
1030 // no update necessary for the file; check if need a snapshot.
1031 if (delta_indecies == 0) {
1032 if (cache->shmem && !update_snapshot) {
1033 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1034 }
1035 }
1036
1037 // if a snapshot is necessary, memcpy from remote frozen process' memory
1038