]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging_disk.c
45624a75d99922f818687ccb41f343f3b1818ee4
[apple/libc.git] / gen / stack_logging_disk.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <pthread.h>
38 #include <errno.h>
39 #include "stack_logging.h"
40 #include "malloc_printf.h"
41 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
42
43 #pragma mark -
44 #pragma mark Defines
45
46 #ifdef TEST_DISK_STACK_LOGGING
47 #define _malloc_printf fprintf
48 #undef ASL_LEVEL_INFO
49 #define ASL_LEVEL_INFO stderr
50 #endif
51
52 #define STACK_LOGGING_THREAD_HASH_SIZE 2048 // must be an even power of two
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_NUMBER_RECENT_BACKTRACES 50
56 #define STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY 100
57 #define STACK_LOGGING_MAX_THREAD_COLLISIONS 3
58 #define STACK_LOGGING_MIN_SAME_FRAMES 3
59 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
60 #define STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE 5
61 #define STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY 1 << 14 // <2mb for 32->32, ~3mb for (32->64 || 64->32), ~4mb for 64->64
62 #define STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE 3
63 #define STACK_LOGGING_REMOTE_LINKS_PER_BLOCK (1 << 20) // this sets a maximum number of malloc/frees that can be read in to: 1^30;
64 // this means if the .index file is >24gb, remote access will start to fail.
65 // note: at this point, the .stack file will probably be ~56gb on top of that and
66 // it'll also be using around 20 gb of memory in the analyzing process...
67 // all of these are 64-bit stats; the 32-bit analyzing process limits are lower.
68 // in short, if you want to analyze a process making > 1 billion malloc/frees
69 // (after compaction), bump this number slightly.
70
71 #pragma mark -
72 #pragma mark Macros
73
74 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
75 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
76 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
77
78 #pragma mark -
79 #pragma mark Types
80
81 #pragma mark - stack_logging_backtrace_event
82 typedef struct {
83 int16_t offset_delta; // may want to expand this one; should always be < 0.
84 uint16_t num_identical_frames;
85 uint16_t num_new_hot_frames; // count of backtrace[]
86 } stack_logging_backtrace_event;
87
88 #pragma mark - stack_logging_index_event
89 typedef struct {
90 uintptr_t argument;
91 uintptr_t address;
92 uint64_t offset_and_flags; // top 8 bits are actually the flags!
93 } stack_logging_index_event;
94
95 #pragma mark - stack_logging_index_event32
96 typedef struct {
97 uint32_t argument;
98 uint32_t address;
99 uint64_t offset_and_flags; // top 8 bits are actually the flags!
100 } stack_logging_index_event32;
101
102 #pragma mark - stack_logging_index_event64
103 typedef struct {
104 uint64_t argument;
105 uint64_t address;
106 uint64_t offset_and_flags; // top 8 bits are actually the flags!
107 } stack_logging_index_event64;
108
109 #pragma mark - thread_backtrace_history
110 // for management of previous backtraces (by thread):
111 typedef struct {
112 vm_address_t thread;
113 uint32_t hash_pos;
114 uint64_t logging_index;
115 int64_t logging_offset;
116 uint32_t full_backtrace_countdown;
117 uint32_t backtrace_length;
118 uintptr_t *backtrace;
119 } thread_backtrace_history;
120
121 #pragma mark - stack_buffer_shared_memory
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
125 #pragma pack(push,4)
126 typedef struct {
127 uint64_t start_index_offset;
128 uint64_t start_stack_offset;
129 uint32_t next_free_index_buffer_offset;
130 uint32_t next_free_stack_buffer_offset;
131 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
132 char stack_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
133 } stack_buffer_shared_memory;
134 #pragma pack(pop)
135
136 #pragma mark - index_ll_node
137 // linked-list node in table for allocations of a single address
138 typedef struct index_ll_node {
139 struct index_ll_node *next;
140 uint64_t index_file_offset;
141 } index_ll_node;
142
143 #pragma mark - remote_index_node32
144 // 32-bit target process address slot in table
145 typedef struct {
146 uint32_t address;
147 index_ll_node *linked_list;
148 index_ll_node *last_link;
149 } remote_index_node32;
150
151 #pragma mark - remote_index_node64
152 // 64-bit target process variant
153 typedef struct {
154 uint64_t address;
155 index_ll_node *linked_list;
156 index_ll_node *last_link;
157 } remote_index_node64;
158
159 #pragma mark - remote_index_cache
160 // for caching index information client-side:
161 typedef struct {
162 size_t cache_size;
163 size_t cache_node_capacity;
164 uint32_t collision_allowance;
165 uint64_t cache_node_count; // Debug only.
166 uint64_t cache_llnode_count; // Debug only.
167 size_t in_use_node_size; // sizeof(remote_index_node32) || sizeof(remote_index_node64)
168 void *table_memory; // this can be malloced; it's on the client side.
169 remote_index_node32 *casted_table32; // represents table memory as 32-bit.
170 remote_index_node64 *casted_table64; // ditto, 64-bit
171 stack_buffer_shared_memory *shmem; // shared memory
172 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
173 uint32_t last_pre_written_index_size;
174 uint64_t last_index_file_offset;
175 index_ll_node *blocks[1024];
176 uint32_t current_block;
177 uint32_t next_block_index;
178 } remote_index_cache;
179
180 #pragma mark - remote_task_file_streams
181 // for reading stack history information from remote processes:
182 typedef struct {
183 task_t remote_task;
184 pid_t remote_pid;
185 int32_t task_is_64_bit;
186 int32_t in_use_count;
187 FILE *index_file_stream;
188 FILE *stack_file_stream;
189 remote_index_cache *cache;
190 } remote_task_file_streams;
191
192 #pragma mark -
193 #pragma mark Constants
194
195 static stack_buffer_shared_memory *pre_write_buffers;
196 static char *pre_write_backtrace_event_buffer = NULL;
197 static char *pre_write_index_buffer = NULL;
198
199 static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
200 static uint64_t current_logging_index = 0;
201 static int64_t total_offset = 0;
202
203 // single-thread access variables
204 static vm_address_t stack_buffer[STACK_LOGGING_NUMBER_RECENT_BACKTRACES][STACK_LOGGING_MAX_STACK_SIZE];
205 static thread_backtrace_history thread_buffer[STACK_LOGGING_THREAD_HASH_SIZE];
206 static int32_t current_stack_buffer = 0;
207 static uintptr_t last_logged_malloc_address = 0;
208 static uint32_t last_logged_backtrace_offset_diff = 0;
209 static thread_backtrace_history compaction_saved_differencing_history;
210
211 // Constants to define stack logging directory and path names.
212 // Files will get written to /tmp/stack-logs.<pid>.<progname>.XXXXXX/stack-logs.{index,stacks}
213 // The directory is securely created with mkdtemp() and the files inside it just have static names for simplicity.
214 static const char *temporary_directory = "/tmp";
215 static const char *stack_logging_directory_base_name = "stack-logs.";
216 static const char *index_file_name = "stack-logs.index";
217 static const char *stack_file_name = "stack-logs.stacks";
218
219 static char stack_logs_directory[PATH_MAX];
220 static char index_file_path[PATH_MAX];
221 static char stack_file_path[PATH_MAX];
222 static int index_file_descriptor = -1;
223 static int stack_file_descriptor = -1;
224
225 // for accessing remote log files
226 static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
227 static uint32_t next_remote_task_fd = 0;
228 static uint32_t remote_task_fd_count = 0;
229 static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
230
231 // activation variables
232
233 static int logging_use_compaction = 1; // set this to zero to always disable compaction.
234
235 // We set malloc_logger to NULL to disable logging, if we encounter errors
236 // during file writing
237 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
238 extern malloc_logger_t *malloc_logger;
239
240 #pragma mark -
241 #pragma mark Disk Stack Logging
242
243 static void delete_log_files(void); // pre-declare
244
245 static void
246 append_int(char * filename, pid_t pid)
247 {
248 unsigned int value;
249 size_t len;
250 unsigned int i;
251 unsigned int count;
252
253 len = strlen(filename);
254
255 count = 0;
256 value = pid;
257 while (value > 0) {
258 value /= 10;
259 count ++;
260 }
261
262 filename[len + count] = 0;
263
264 value = pid;
265 for(i = 0 ; i < count ; i ++) {
266 filename[len + count - 1 - i] = '0' + value % 10;
267 value /= 10;
268 }
269 }
270
271 // If successful, returns path to directory that was created. Otherwise returns NULL.
272 static char *
273 create_log_files(void)
274 {
275 pid_t pid = getpid();
276 const char *progname = getprogname();
277 char path_name[PATH_MAX];
278 char *created_directory = NULL;
279
280 // WARNING! use of snprintf can induce malloc() calls
281 strlcpy(stack_logs_directory, temporary_directory, PATH_MAX);
282 strlcat(stack_logs_directory, "/", PATH_MAX);
283 strlcat(stack_logs_directory, stack_logging_directory_base_name, PATH_MAX);
284 append_int(stack_logs_directory, pid);
285 if (progname && progname[0] != '\0') {
286 strlcat(stack_logs_directory, ".", PATH_MAX);
287 strlcat(stack_logs_directory, progname, PATH_MAX);
288 }
289 strlcat(stack_logs_directory, ".XXXXXX", PATH_MAX);
290
291 // Securely make temporary directory for the log files, then create the files.
292 if (mkdtemp(stack_logs_directory) == stack_logs_directory) {
293 strlcpy(path_name, stack_logs_directory, PATH_MAX);
294 strlcat(path_name, "/", PATH_MAX);
295 strlcat(path_name, index_file_name, PATH_MAX);
296 strlcpy(index_file_path, path_name, PATH_MAX);
297 index_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
298
299 strlcpy(path_name, stack_logs_directory, PATH_MAX);
300 strlcat(path_name, "/", PATH_MAX);
301 strlcat(path_name, stack_file_name, PATH_MAX);
302 strlcpy(stack_file_path, path_name, PATH_MAX);
303 stack_file_descriptor = open(path_name, O_WRONLY | O_TRUNC | O_CREAT, 0600);
304
305 if (index_file_descriptor == -1 || stack_file_descriptor == -1) {
306 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log files in directory %s\n", stack_logs_directory);
307 delete_log_files();
308 created_directory = NULL;
309 } else {
310 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_logs_directory);
311 created_directory = stack_logs_directory;
312 }
313 } else {
314 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack log directory %s\n", stack_logs_directory);
315 created_directory = NULL;
316 }
317 return created_directory;
318 }
319
320 // This function may be called from either the target process when exiting, or from either the the target process or
321 // a stack log analysis process, when reaping orphaned stack log files.
322 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
323 static int
324 delete_log_files_in_directory(char *logdir)
325 {
326 char path_name[PATH_MAX];
327 int unlink_count = 0;
328 int failure_count = 0;
329 struct stat statbuf;
330
331 if (logdir == NULL || logdir[0] == '\0') return 0;
332
333 strlcpy(path_name, logdir, PATH_MAX);
334 strlcat(path_name, "/", PATH_MAX);
335 strlcat(path_name, index_file_name, PATH_MAX);
336 if (unlink(path_name) == 0) {
337 unlink_count++;
338 } else if (stat(path_name, &statbuf) == 0) {
339 failure_count++;
340 }
341
342 strlcpy(path_name, logdir, PATH_MAX);
343 strlcat(path_name, "/", PATH_MAX);
344 strlcat(path_name, stack_file_name, PATH_MAX);
345 if (unlink(path_name) == 0) {
346 unlink_count++;
347 } else if (stat(path_name, &statbuf) == 0) {
348 failure_count++;
349 }
350
351 if (rmdir(logdir) == -1) failure_count++;
352
353 return (failure_count > 0) ? -1 : 0;
354 }
355
356 // This function will be called from atexit() in the target process.
357 static void
358 delete_log_files(void)
359 {
360 if (stack_logs_directory == NULL || stack_logs_directory[0] == '\0') return;
361
362 if (delete_log_files_in_directory(stack_logs_directory) == 0) {
363 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_logs_directory);
364 stack_file_path[0] = '\0';
365 index_file_path[0] = '\0';
366 } else {
367 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_logs_directory);
368 }
369 }
370
371 static bool
372 is_process_running(pid_t pid)
373 {
374 struct kinfo_proc kpt[1];
375 size_t size = sizeof(struct kinfo_proc);
376 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
377
378 sysctl(mib, 4, kpt, &size, NULL, 0); // size is either 1 or 0 entries when we ask for a single pid
379
380 return (size==sizeof(struct kinfo_proc));
381 }
382
383 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
384 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
385 // So, reap any stack log files for processes that no longer exist.
386 //
387 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
388 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
389 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
390 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
391 // its own log files.
392 static void
393 reap_orphaned_log_files(bool remove_for_this_pid)
394 {
395 DIR *dp;
396 struct dirent *entry;
397 int prefix_length;
398 char prefix_name[PATH_MAX];
399 char pathname[PATH_MAX];
400 pid_t current_pid = getpid();
401
402 if ((dp = opendir(temporary_directory)) == NULL) {
403 return;
404 }
405
406 strlcpy(prefix_name, stack_logging_directory_base_name, PATH_MAX);
407 prefix_length = strlen(prefix_name);
408
409 while ( (entry = readdir(dp)) != NULL ) {
410 if ( entry->d_type == DT_DIR && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
411 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
412 if ( (! is_process_running(pid)) || (remove_for_this_pid && pid == current_pid) ) {
413 strlcpy(pathname, temporary_directory, PATH_MAX);
414 strlcat(pathname, "/", PATH_MAX);
415 strlcat(pathname, entry->d_name, PATH_MAX);
416 if (delete_log_files_in_directory(pathname) == 0) {
417 if (remove_for_this_pid && pid == current_pid) {
418 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
419 } else {
420 _malloc_printf(ASL_LEVEL_INFO, "process %d no longer exists, stack logs deleted from %s\n", pid, pathname);
421 }
422 }
423 }
424 }
425 }
426 closedir(dp);
427 }
428
429 /*
430 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
431 * for disabling any future logging in this process and for informing the user.
432 */
433 static void
434 disable_stack_logging(void)
435 {
436 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
437 stack_logging_enable_logging = 0;
438 malloc_logger = NULL;
439 }
440
441 /* A wrapper around write() that will try to reopen the index/stack file and
442 * write to it if someone closed it underneath us (e.g. the process we just
443 * started decide to close all file descriptors except stin/err/out). Some
444 * programs like to do that and calling abort() on them is rude.
445 */
446 static ssize_t
447 robust_write(int fd, const void *buf, size_t nbyte) {
448 extern int errno;
449 ssize_t written = write(fd, buf, nbyte);
450 if (written == -1 && errno == EBADF) {
451 char *file_to_reopen = NULL;
452 int *fd_to_reset = NULL;
453
454 // descriptor was closed on us. We need to reopen it
455 if (fd == index_file_descriptor) {
456 file_to_reopen = index_file_path;
457 fd_to_reset = &index_file_descriptor;
458 }
459 else if (fd == stack_file_descriptor) {
460 file_to_reopen = stack_file_path;
461 fd_to_reset = &stack_file_descriptor;
462 } else {
463 // We don't know about this file. Return (and abort()).
464 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor (it's neither the index file, nor the stacks file)\n");
465 return -1;
466 }
467
468 // The file *should* already exist. If not, fail.
469 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
470 if (fd < 3) {
471 // If we somehow got stdin/out/err, we need to relinquish them and
472 // get another fd.
473 int fds_to_close[3] = { 0 };
474 while (fd < 3) {
475 if (fd == -1) {
476 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack log file %s\n", file_to_reopen);
477 delete_log_files();
478 return -1;
479 }
480 fds_to_close[fd] = 1;
481 fd = dup(fd);
482 }
483
484 // We have an fd we like. Close the ones we opened.
485 if (fds_to_close[0]) close(0);
486 if (fds_to_close[1]) close(1);
487 if (fds_to_close[2]) close(2);
488 }
489
490 *fd_to_reset = fd;
491 written = write(fd, buf, nbyte);
492 }
493 return written;
494 }
495
496 static void
497 flush_data(void)
498 {
499 ssize_t written; // signed size_t
500 size_t remaining;
501 char * p;
502
503 if (index_file_descriptor == -1) {
504 if (create_log_files() == NULL) {
505 return;
506 }
507 }
508
509 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
510 p = pre_write_backtrace_event_buffer;
511 remaining = (size_t)pre_write_buffers->next_free_stack_buffer_offset;
512 while (remaining > 0) {
513 written = robust_write(stack_file_descriptor, p, remaining);
514 if (written == -1) {
515 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", stack_file_path, strerror(errno));
516 disable_stack_logging();
517 return;
518 }
519 p += written;
520 remaining -= written;
521 }
522 p = pre_write_index_buffer;
523 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
524 while (remaining > 0) {
525 written = robust_write(index_file_descriptor, p, remaining);
526 if (written == -1) {
527 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", index_file_path, strerror(errno));
528 disable_stack_logging();
529 return;
530 }
531 p += written;
532 remaining -= written;
533 }
534
535 pre_write_buffers->start_stack_offset += pre_write_buffers->next_free_stack_buffer_offset;
536 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
537 pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
538 }
539
540 static void
541 prepare_to_log_stacks(void)
542 {
543 if (!pre_write_buffers) {
544 last_logged_malloc_address = 0ul;
545 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
546
547 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
548 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
549 // the contents of these buffers don't change while being inspected.
550 char shmem_name_string[PATH_MAX];
551 strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
552 append_int(shmem_name_string, getpid());
553
554 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
555 if (shmid < 0) {
556 // Failed to create shared memory region; turn off stack logging.
557 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
558 disable_stack_logging();
559 return;
560 }
561
562 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
563 ftruncate(shmid, (off_t)full_shared_mem_size);
564 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
565 close(shmid);
566
567 if (!pre_write_buffers) {
568 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
569 disable_stack_logging();
570 return;
571 }
572
573 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
574 pre_write_buffers->start_index_offset = pre_write_buffers->start_stack_offset = 0ull;
575 pre_write_buffers->next_free_index_buffer_offset = pre_write_buffers->next_free_stack_buffer_offset = 0;
576 pre_write_backtrace_event_buffer = pre_write_buffers->stack_buffer;
577 pre_write_index_buffer = pre_write_buffers->index_buffer;
578
579 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
580 atexit(delete_log_files); // atexit() can call malloc()
581 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
582
583 // this call to flush data ensures that the log files (while possibly empty) exist; analyzing processes will rely on this assumption.
584 flush_data();
585 }
586 }
587
588 void
589 __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
590 {
591 if (!stack_logging_enable_logging) return;
592
593 // check incoming data
594 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
595 uintptr_t swapper = size;
596 size = ptr_arg;
597 ptr_arg = swapper;
598 if (ptr_arg == return_val) return; // realloc had no effect, skipping
599
600 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
601 type_flags ^= stack_logging_type_dealloc;
602 } else {
603 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
604 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
605 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
606 return;
607 }
608 }
609 if (type_flags & stack_logging_type_dealloc) {
610 if (size) {
611 ptr_arg = size;
612 size = 0;
613 } else return; // free(nil)
614 }
615 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
616
617 type_flags &= 0x7;
618
619 // now actually begin
620 prepare_to_log_stacks();
621
622 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
623 if (!stack_logging_enable_logging) return;
624 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
625
626 // lock and enter
627 OSSpinLockLock(&stack_logging_lock);
628
629 // compaction
630 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
631 // *waves hand* the last allocation never occurred
632 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
633 pre_write_buffers->next_free_stack_buffer_offset -= last_logged_backtrace_offset_diff;
634 total_offset -= (int64_t)last_logged_backtrace_offset_diff;
635 last_logged_malloc_address = 0ul;
636
637 // not going to subtract from the current_stack_buffer or current_logging_index indecies;
638 // there is no intention to restore the previously held stack. the differencing history
639 // must be reset to its previous value, though.
640 thread_buffer[compaction_saved_differencing_history.hash_pos] = compaction_saved_differencing_history;
641
642 OSSpinLockUnlock(&stack_logging_lock);
643 return;
644 }
645
646 // locate previous backtrace for this thread
647 short difference = 1;
648
649 uint32_t collisions = STACK_LOGGING_MAX_THREAD_COLLISIONS;
650 uint32_t hashed_thread = self_thread & (STACK_LOGGING_THREAD_HASH_SIZE-1);
651 while (thread_buffer[hashed_thread].thread && thread_buffer[hashed_thread].thread != self_thread) {
652 if (--collisions == 0) {
653 difference = 0;
654 break;
655 }
656 hashed_thread++;
657 }
658
659 // gather stack
660 uint32_t count;
661 thread_stack_pcs(stack_buffer[current_stack_buffer], STACK_LOGGING_MAX_STACK_SIZE, &count);
662 stack_buffer[current_stack_buffer][count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
663 num_hot_to_skip += 2;
664 if (count <= num_hot_to_skip) {
665 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
666 OSSpinLockUnlock(&stack_logging_lock);
667 return;
668 }
669
670 // easy access variables
671 thread_backtrace_history *historical = &thread_buffer[hashed_thread];
672 vm_address_t *frames = stack_buffer[current_stack_buffer];
673
674 // increment as necessary
675 current_logging_index++;
676 current_stack_buffer++;
677 if (current_stack_buffer == STACK_LOGGING_NUMBER_RECENT_BACKTRACES) current_stack_buffer = 0;
678
679 // difference (if possible)
680 if (historical->logging_index + STACK_LOGGING_NUMBER_RECENT_BACKTRACES <= current_logging_index) difference = 0;
681 else if (historical->full_backtrace_countdown == 0) difference = 0;
682
683 uint32_t sameness = 0;
684 if (difference) {
685 uint32_t old_count = historical->backtrace_length;
686 int32_t new_count = (int32_t)count;
687 while (old_count-- && new_count-- > (int32_t)num_hot_to_skip) {
688 if (historical->backtrace[old_count] == frames[new_count]) sameness++;
689 else break;
690 }
691
692 if (sameness < STACK_LOGGING_MIN_SAME_FRAMES) { // failure; pretend nothing was the same
693 difference = 0;
694 }
695 }
696
697 // create events for byte storage
698 count -= num_hot_to_skip;
699 stack_logging_backtrace_event current_event;
700 current_event.num_identical_frames = (difference ? sameness : 0);
701 current_event.num_new_hot_frames = (difference ? count - sameness : count);
702 current_event.offset_delta = (difference ? historical->logging_offset - total_offset : 0);
703 int64_t this_offset_change = sizeof(stack_logging_backtrace_event) + (current_event.num_new_hot_frames * sizeof(uintptr_t));
704
705 stack_logging_index_event current_index;
706 if (type_flags & stack_logging_type_alloc) {
707 current_index.address = STACK_LOGGING_DISGUISE(return_val);
708 current_index.argument = size;
709 if (logging_use_compaction) {
710 last_logged_malloc_address = current_index.address; // disguised
711 last_logged_backtrace_offset_diff = (uint32_t)this_offset_change;
712 compaction_saved_differencing_history = *historical;
713 }
714 } else {
715 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
716 current_index.argument = 0ul;
717 last_logged_malloc_address = 0ul;
718 }
719 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(total_offset, type_flags);
720
721 // prepare for differencing next time
722 historical->backtrace = (uintptr_t*)(frames + num_hot_to_skip);
723 historical->backtrace_length = count;
724 if (difference) historical->full_backtrace_countdown--;
725 else historical->full_backtrace_countdown = STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY;
726 historical->logging_index = current_logging_index;
727 historical->logging_offset = total_offset;
728 historical->thread = self_thread;
729 historical->hash_pos = hashed_thread;
730
731 // flush the data buffer to disk if necessary
732 if (pre_write_buffers->next_free_stack_buffer_offset + this_offset_change >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
733 flush_data();
734 } else if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
735 flush_data();
736 }
737
738 // store bytes in buffers
739 memcpy(pre_write_index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
740 memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset, &current_event, sizeof(stack_logging_backtrace_event));
741 memcpy(pre_write_backtrace_event_buffer+pre_write_buffers->next_free_stack_buffer_offset+sizeof(stack_logging_backtrace_event), frames+num_hot_to_skip, (size_t)this_offset_change - sizeof(stack_logging_backtrace_event));
742 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
743 pre_write_buffers->next_free_stack_buffer_offset += (uint32_t)this_offset_change;
744 total_offset += this_offset_change;
745
746 OSSpinLockUnlock(&stack_logging_lock);
747 }
748
749 #pragma mark -
750 #pragma mark Remote Stack Log Access
751
752 #pragma mark - Design notes:
753
754 /*
755
756 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
757 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
758 // Gets the last allocation record about address
759
760 if !address, will load both index and stack logs and iterate through (expensive)
761 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
762 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
763 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
764
765 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
766 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
767 // Given a uniqued_stack fills stack_frames_buffer
768
769 */
770
771 #pragma mark - caching
772
773 static inline size_t hash_index_32(uint32_t address, size_t max_pos) __attribute__((always_inline));
774 static inline size_t hash_index_32(uint32_t address, size_t max_pos) {
775 // return (((OSSwapInt32(address >> 2) << 3) & 0x96AAAA98) ^ (address >> 2)) % (max_pos-1);
776 return (address >> 2) % (max_pos-1); // simplicity rules.
777 }
778
779 static inline size_t hash_index_64(uint64_t address, size_t max_pos) __attribute__((always_inline));
780 static inline size_t hash_index_64(uint64_t address, size_t max_pos) {
781 // return (size_t)((((OSSwapInt64(address >> 3) << 2) & 0x54AA0A0AAA54ull) ^ (address >> 3)) % (max_pos - 1));
782 return (size_t)((address >> 3) % (max_pos-1)); // simplicity rules.
783 }
784
785 static void
786 transfer_node_ll32(remote_index_cache *cache, remote_index_node32 *old_node)
787 {
788 uint32_t collisions = 0;
789 size_t pos = hash_index_32(old_node->address, cache->cache_node_capacity);
790 do {
791 if (cache->casted_table32[pos].address == old_node->address) { // hit like this shouldn't happen.
792 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll32)\n");
793 break;
794 } else if (cache->casted_table32[pos].address == 0) { // empty
795 cache->casted_table32[pos] = *old_node;
796 break;
797 } else {
798 pos++;
799 if (pos >= cache->cache_node_capacity) pos = 0;
800 }
801 collisions++;
802 } while (collisions <= cache->collision_allowance);
803
804 if (collisions > cache->collision_allowance) {
805 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll32)\n", sizeof(void*)*8);
806 }
807 }
808
809 static void
810 transfer_node_ll64(remote_index_cache *cache, remote_index_node64 *old_node)
811 {
812 uint32_t collisions = 0;
813 size_t pos = hash_index_64(old_node->address, cache->cache_node_capacity);
814 do {
815 if (cache->casted_table64[pos].address == old_node->address) { // hit!
816 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node_ll64)\n");
817 break;
818 } else if (cache->casted_table64[pos].address == 0) { // empty
819 cache->casted_table64[pos] = *old_node;
820 break;
821 } else {
822 pos++;
823 if (pos >= cache->cache_node_capacity) pos = 0;
824 }
825 collisions++;
826 } while (collisions <= cache->collision_allowance);
827
828 if (collisions > cache->collision_allowance) {
829 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node_ll64)\n", sizeof(void*)*8);
830 }
831 }
832
833 static void
834 expand_cache(remote_index_cache *cache)
835 {
836 // keep old stats
837 size_t old_node_capacity = cache->cache_node_capacity;
838 uint64_t old_node_count = cache->cache_node_count;
839 uint64_t old_llnode_count = cache->cache_llnode_count;
840 void *old_table = cache->table_memory;
841
842 // double size
843 cache->cache_size <<= 1;
844 cache->cache_node_capacity <<= 1;
845 cache->collision_allowance += STACK_LOGGING_REMOTE_CACHE_COLLISION_GROWTH_RATE;
846 cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
847 if (cache->casted_table32) cache->casted_table32 = cache->table_memory;
848 else cache->casted_table64 = cache->table_memory;
849
850 // repopulate (expensive!)
851 size_t i;
852 if (cache->casted_table32) { // if target is 32-bit
853 remote_index_node32 *casted_old_table = (remote_index_node32*)old_table;
854 for (i = 0; i < old_node_capacity; i++) {
855 if (casted_old_table[i].address) {
856 transfer_node_ll32(cache, &casted_old_table[i]);
857 }
858 }
859 } else {
860 remote_index_node64 *casted_old_table = (remote_index_node64*)old_table;
861 for (i = 0; i < old_node_capacity; i++) {
862 if (casted_old_table[i].address) {
863 transfer_node_ll64(cache, &casted_old_table[i]);
864 }
865 }
866 }
867
868 cache->cache_node_count = old_node_count;
869 cache->cache_llnode_count = old_llnode_count;
870 free(old_table);
871 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
872 }
873
874 static void
875 insert_node32(remote_index_cache *cache, uint32_t address, uint64_t index_file_offset)
876 {
877 uint32_t collisions = 0;
878 size_t pos = hash_index_32(address, cache->cache_node_capacity);
879
880 if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
881 cache->next_block_index = 0;
882 cache->current_block++;
883 cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
884 /* printf("node buffer added. total nodes: %ul (%u buffers, %0.2f mb)\n", STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*(cache->current_block+1),
885 cache->current_block+1, ((float)(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node)*(cache->current_block+1)))/(1 << 20));
886 */
887 }
888 index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
889 new_node->index_file_offset = index_file_offset;
890 new_node->next = NULL;
891
892 bool inserted = false;
893 while (!inserted) {
894 if (cache->casted_table32[pos].address == address) { // hit!
895 cache->casted_table32[pos].last_link->next = new_node; // insert at end
896 cache->casted_table32[pos].last_link = new_node;
897 inserted = true;
898 break;
899 } else if (cache->casted_table32[pos].address == 0) { // empty
900 cache->casted_table32[pos].address = address;
901 cache->casted_table32[pos].linked_list = new_node;
902 cache->casted_table32[pos].last_link = new_node;
903 cache->cache_node_count++;
904 inserted = true;
905 break;
906 } else {
907 pos++;
908 if (pos >= cache->cache_node_capacity) pos = 0;
909 }
910 collisions++;
911 if (collisions > cache->collision_allowance) {
912 expand_cache(cache);
913 pos = hash_index_32(address, cache->cache_node_capacity);
914 collisions = 0;
915 }
916 }
917
918 cache->cache_llnode_count++;
919
920 }
921
922 static void
923 insert_node64(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
924 {
925 uint32_t collisions = 0;
926 size_t pos = hash_index_64(address, cache->cache_node_capacity);
927
928 if (cache->next_block_index >= STACK_LOGGING_REMOTE_LINKS_PER_BLOCK) {
929 cache->next_block_index = 0;
930 cache->current_block++;
931 cache->blocks[cache->current_block] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
932 }
933 index_ll_node *new_node = &cache->blocks[cache->current_block][cache->next_block_index++];
934 new_node->index_file_offset = index_file_offset;
935 new_node->next = NULL;
936
937 bool inserted = false;
938 while (!inserted) {
939 if (cache->casted_table64[pos].address == address) { // hit!
940 cache->casted_table64[pos].last_link->next = new_node; // insert at end
941 cache->casted_table64[pos].last_link = new_node;
942 inserted = true;
943 break;
944 } else if (cache->casted_table64[pos].address == 0) { // empty
945 cache->casted_table64[pos].address = address;
946 cache->casted_table64[pos].linked_list = new_node;
947 cache->casted_table64[pos].last_link = new_node;
948 inserted = true;
949 break;
950 } else {
951 pos++;
952 if (pos >= cache->cache_node_capacity) pos = 0;
953 }
954 collisions++;
955 if (collisions > cache->collision_allowance) {
956 expand_cache(cache);
957 pos = hash_index_64(address, cache->cache_node_capacity);
958 collisions = 0;
959 }
960 }
961
962 }
963
964 static void
965 update_cache_for_file_streams(remote_task_file_streams *descriptors)
966 {
967 remote_index_cache *cache = descriptors->cache;
968
969 // create from scratch if necessary.
970 if (!cache) {
971 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
972 cache->cache_node_capacity = STACK_LOGGING_REMOTE_CACHE_DEFAULT_NODE_CAPACITY;
973 cache->collision_allowance = STACK_LOGGING_REMOTE_CACHE_DEFAULT_COLLISION_ALLOWANCE;
974 cache->cache_node_count = cache->cache_llnode_count = 0;
975 cache->last_index_file_offset = 0;
976 cache->next_block_index = 0;
977 cache->current_block = 0;
978 cache->blocks[0] = (index_ll_node*)malloc(STACK_LOGGING_REMOTE_LINKS_PER_BLOCK*sizeof(index_ll_node));
979 cache->in_use_node_size = (descriptors->task_is_64_bit ? sizeof(remote_index_node64) : sizeof(remote_index_node32));
980 cache->cache_size = cache->cache_node_capacity*cache->in_use_node_size;
981 cache->table_memory = (void*)calloc(cache->cache_node_capacity, cache->in_use_node_size);
982 if (descriptors->task_is_64_bit) cache->casted_table64 = (remote_index_node64*)(cache->table_memory);
983 else cache->casted_table32 = (remote_index_node32*)(cache->table_memory);
984
985 // now map in the shared memory, if possible
986 char shmem_name_string[PATH_MAX];
987 strlcpy(shmem_name_string, stack_logging_directory_base_name, (size_t)PATH_MAX);
988 append_int(shmem_name_string, descriptors->remote_pid);
989
990 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
991 if (shmid >= 0) {
992 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
993 close(shmid);
994 }
995
996 if (shmid < 0 || cache->shmem == NULL) {
997 // failed to connect to the shared memory region; warn and continue.
998 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
999 }
1000 }
1001
1002 // suspend and see how much updating there is to do. there are three scenarios, listed below
1003 bool update_snapshot = false;
1004 if (descriptors->remote_task != mach_task_self()) {
1005 task_suspend(descriptors->remote_task);
1006 }
1007
1008 struct stat file_statistics;
1009 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1010 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1011 uint64_t read_this_update = 0;
1012
1013 // the delta indecies is a complex number; there are three cases:
1014 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1015 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1016 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1017 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1018 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1019 uint32_t last_snapshot_scan_index = 0;
1020 if (delta_indecies && cache->shmem) {
1021 // case 3: add cache scanned to known from disk and recalc
1022 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1023 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1024 update_snapshot = true;
1025 } else if (cache->shmem) {
1026 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1027 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1028 }
1029
1030 // no update necessary for the file; check if need a snapshot.
1031 if (delta_indecies == 0) {
1032 if (cache->shmem && !update_snapshot) {
1033 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1034 }
1035 }
1036
1037 // if a snapshot is necessary, memcpy from remote frozen process' memory
1038 // note: there were two ways to do this Ð spin lock or suspend. suspend allows us to
1039 // analyze processes even if they were artificially suspended. with a lock, there'd be
1040 // worry that the target was suspended with the lock taken.
1041 if (update_snapshot) {
1042 memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
1043 }
1044
1045 // resume
1046 if (descriptors->remote_task != mach_task_self()) {
1047 task_resume(descriptors->remote_task);
1048 }
1049
1050 if (!update_snapshot && delta_indecies == 0) return; // absolutely no updating needed.
1051
1052 FILE *the_index = (descriptors->index_file_stream);
1053
1054 // prepare for the read; target process could be 32 or 64 bit.
1055
1056 stack_logging_index_event32 *target_32_index = NULL;
1057 stack_logging_index_event64 *target_64_index = NULL;
1058
1059 // perform the update from the file
1060 uint32_t i;
1061 if (delta_indecies) {
1062 char bufferSpace[4096]; // 4 kb
1063 target_32_index = (stack_logging_index_event32*)bufferSpace;
1064 target_64_index = (stack_logging_index_event64*)bufferSpace;
1065 size_t number_slots = (size_t)(4096/read_size);
1066
1067 size_t read_count = 0;
1068 if (fseeko(the_index, (off_t)(cache->last_index_file_offset), SEEK_SET)) {
1069 fprintf(stderr, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1070 }
1071 off_t current_index_position = cache->last_index_file_offset;
1072 do {
1073 number_slots = MIN(delta_indecies - read_this_update, number_slots);
1074 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1075 if (descriptors->task_is_64_bit) {
1076 for (i = 0; i < read_count; i++) {
1077 insert_node64(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
1078 read_this_update++;
1079 current_index_position += read_size;
1080 }
1081 } else {
1082 for (i = 0; i < read_count; i++) {
1083 insert_node32(cache, STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
1084 read_this_update++;
1085 current_index_position += read_size;
1086 }
1087 }
1088 } while (read_count);
1089
1090 if (read_this_update < delta_indecies) {
1091 fprintf(stderr, "insufficient data in remote stack index file; expected more records.\n");
1092 }
1093 cache->last_index_file_offset += read_this_update * read_size;
1094 }
1095
1096 if (update_snapshot) {
1097 target_32_index = (stack_logging_index_event32*)(cache->snapshot.index_buffer);
1098 target_64_index = (stack_logging_index_event64*)(cache->snapshot.index_buffer);
1099
1100 uint32_t free_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1101 off_t current_index_position = cache->snapshot.start_index_offset;
1102 if (descriptors->task_is_64_bit) {
1103 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1104 insert_node64(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1105 }
1106 } else {
1107 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1108 insert_node32(cache, STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1109 }
1110 }
1111 }
1112
1113 }
1114
1115 static void
1116 destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
1117 {
1118 uint32_t i;
1119 for (i = 0; i <= descriptors->cache->current_block; i++) {
1120 free(descriptors->cache->blocks[i]); // clears the linked list nodes.
1121 }
1122 if (descriptors->cache->shmem) {
1123 munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
1124 }
1125 free(descriptors->cache->table_memory);
1126 free(descriptors->cache);
1127 descriptors->cache = NULL;
1128 }
1129
1130 #pragma mark - internal
1131
1132 // In the stack log analysis process, find the stack logging files for target process <pid>
1133 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1134 // If we find such a directory then open the stack logging files in there.
1135 static void
1136 open_log_files(pid_t pid, remote_task_file_streams *this_task_streams)
1137 {
1138 DIR *dp;
1139 struct dirent *entry;
1140 int prefix_length;
1141 char prefix_name[PATH_MAX];
1142 char pathname[PATH_MAX];
1143
1144 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1145
1146 if ((dp = opendir(temporary_directory)) == NULL) {
1147 return;
1148 }
1149
1150 // It's OK to use snprintf in this routine since it should only be called by the clients
1151 // of stack logging, and thus calls to malloc are OK.
1152 snprintf(prefix_name, PATH_MAX, "%s%d.", stack_logging_directory_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1153 prefix_length = strlen(prefix_name);
1154
1155 while ( (entry = readdir(dp)) != NULL ) {
1156 if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
1157 snprintf(pathname, PATH_MAX, "%s/%s/%s", temporary_directory, entry->d_name, index_file_name);
1158 this_task_streams->index_file_stream = fopen(pathname, "r");
1159
1160 snprintf(pathname, PATH_MAX, "%s/%s/%s", temporary_directory, entry->d_name, stack_file_name);
1161 this_task_streams->stack_file_stream = fopen(pathname, "r");
1162
1163 break;
1164 }
1165 }
1166 closedir(dp);
1167 }
1168
1169 static remote_task_file_streams*
1170 retain_file_streams_for_task(task_t task)
1171 {
1172 OSSpinLockLock(&remote_fd_list_lock);
1173
1174 // see if they're already in use
1175 uint32_t i = 0;
1176 for (i = 0; i < remote_task_fd_count; i++) {
1177 if (remote_fds[i].remote_task == task) {
1178 remote_fds[i].in_use_count++;
1179 OSSpinLockUnlock(&remote_fd_list_lock);
1180 return &remote_fds[i];
1181 }
1182 }
1183
1184 // open them
1185 uint32_t failures = 0;
1186 if (remote_task_fd_count == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1187 while (remote_fds[next_remote_task_fd].in_use_count > 0) {
1188 next_remote_task_fd++;
1189 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1190 failures++;
1191 if (failures >= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1192 OSSpinLockUnlock(&remote_fd_list_lock);
1193 return NULL;
1194 }
1195 }
1196 fclose(remote_fds[next_remote_task_fd].index_file_stream);
1197 fclose(remote_fds[next_remote_task_fd].stack_file_stream);
1198 destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
1199 }
1200
1201 pid_t pid;
1202 kern_return_t err = pid_for_task(task, &pid);
1203 if (err != KERN_SUCCESS) {
1204 OSSpinLockUnlock(&remote_fd_list_lock);
1205 return NULL;
1206 }
1207
1208 remote_task_file_streams *this_task_streams = &remote_fds[next_remote_task_fd];
1209
1210 open_log_files(pid, this_task_streams);
1211
1212 // check if opens failed
1213 if (this_task_streams->index_file_stream == NULL || this_task_streams->stack_file_stream == NULL) {
1214 if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
1215 if (this_task_streams->stack_file_stream) fclose(this_task_streams->stack_file_stream);
1216 OSSpinLockUnlock(&remote_fd_list_lock);
1217 return NULL;
1218 }
1219
1220 // check if target pid is running 64-bit
1221 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1222 struct kinfo_proc processInfo;
1223 size_t bufsize = sizeof(processInfo);
1224 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, (size_t)0) == 0 && bufsize > 0) {
1225 this_task_streams->task_is_64_bit = processInfo.kp_proc.p_flag & P_LP64;
1226 } else {
1227 this_task_streams->task_is_64_bit = 0;
1228 }
1229
1230 // otherwise set vars and go
1231 this_task_streams->in_use_count = 1;
1232 this_task_streams->remote_task = task;
1233 this_task_streams->remote_pid = pid;
1234 next_remote_task_fd++;
1235 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1236 remote_task_fd_count = MIN(remote_task_fd_count + 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED);
1237
1238 OSSpinLockUnlock(&remote_fd_list_lock);
1239 return this_task_streams;
1240 }
1241
1242 static void
1243 release_file_streams_for_task(task_t task)
1244 {
1245 OSSpinLockLock(&remote_fd_list_lock);
1246
1247 // decrement in-use count
1248 uint32_t i = 0;
1249 for (i = 0; i < remote_task_fd_count; i++) {
1250 if (remote_fds[i].remote_task == task) {
1251 remote_fds[i].in_use_count--;
1252 break;
1253 }
1254 }
1255
1256 OSSpinLockUnlock(&remote_fd_list_lock);
1257 }
1258
1259 #pragma mark - extern
1260
1261 kern_return_t
1262 __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1263 {
1264 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1265 if (remote_fd == NULL) {
1266 return KERN_FAILURE;
1267 }
1268
1269 update_cache_for_file_streams(remote_fd);
1270
1271 uint32_t collisions = 0;
1272 uint64_t located_file_position = 0;
1273 bool found = false;
1274 size_t hash = 0;
1275 if (remote_fd->task_is_64_bit) {
1276 hash = hash_index_64(address, remote_fd->cache->cache_node_capacity);
1277 do {
1278 if (remote_fd->cache->casted_table64[hash].address == address) { // hit!
1279 located_file_position = remote_fd->cache->casted_table64[hash].last_link->index_file_offset;
1280 found = true;
1281 break;
1282 } else if (remote_fd->cache->casted_table64[hash].address == 0) { // failure!
1283 break;
1284 }
1285 hash++;
1286 if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
1287 } while (collisions <= remote_fd->cache->collision_allowance);
1288 } else {
1289 hash = hash_index_32((uint32_t)address, remote_fd->cache->cache_node_capacity);
1290 do {
1291 if (remote_fd->cache->casted_table32[hash].address == (uint32_t)address) { // hit!
1292 located_file_position = remote_fd->cache->casted_table32[hash].last_link->index_file_offset;
1293 found = true;
1294 break;
1295 } else if (remote_fd->cache->casted_table32[hash].address == 0) { // failure!
1296 break;
1297 }
1298 hash++;
1299 if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
1300 } while (collisions <= remote_fd->cache->collision_allowance);
1301 }
1302
1303 if (found) {
1304 // prepare for the read; target process could be 32 or 64 bit.
1305 stack_logging_index_event32 *target_32_index = NULL;
1306 stack_logging_index_event64 *target_64_index = NULL;
1307
1308 if (located_file_position >= remote_fd->cache->last_index_file_offset) {
1309 // must be in shared memory
1310 if (remote_fd->cache->shmem) {
1311 if (remote_fd->task_is_64_bit) {
1312 target_64_index = (stack_logging_index_event64*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1313 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1314 } else {
1315 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1316 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1317 }
1318 } else {
1319 found = false;
1320 }
1321
1322 } else {
1323 // it's written to disk
1324 char bufferSpace[128];
1325
1326 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1327 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1328 size_t read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1329 if (read_count) {
1330 if (remote_fd->task_is_64_bit) {
1331 target_64_index = (stack_logging_index_event64*)bufferSpace;
1332 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1333 } else {
1334 target_32_index = (stack_logging_index_event32*)bufferSpace;
1335 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1336 }
1337 } else {
1338 found = false;
1339 }
1340 }
1341 }
1342
1343 release_file_streams_for_task(task);
1344
1345 if (!found) {
1346 return KERN_FAILURE;
1347 }
1348
1349 return __mach_stack_logging_frames_for_uniqued_stack(task, located_file_position, stack_frames_buffer, max_stack_frames, count);
1350 }
1351
1352
1353 kern_return_t
1354 __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context)
1355 {
1356 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1357 if (remote_fd == NULL) {
1358 return KERN_FAILURE;
1359 }
1360
1361 bool reading_all_addresses = (address == 0 ? true : false);
1362 mach_stack_logging_record_t pass_record;
1363 kern_return_t err = KERN_SUCCESS;
1364
1365 if (reading_all_addresses) { // just stupidly read the index file from disk
1366
1367 // update (read index file once and only once)
1368 update_cache_for_file_streams(remote_fd);
1369
1370 FILE *the_index = (remote_fd->index_file_stream);
1371
1372 // prepare for the read; target process could be 32 or 64 bit.
1373 char bufferSpace[2048]; // 2 kb
1374 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1375 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1376 uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
1377 uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
1378 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1379 size_t number_slots = (size_t)(2048/read_size);
1380 uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
1381
1382 // perform the search
1383 size_t read_count = 0;
1384 int64_t current_file_offset = 0;
1385 uint32_t i;
1386 do {
1387 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1388 // and crop file reading to the point where we last scanned
1389 number_slots = (size_t)MIN(number_slots, total_slots);
1390
1391 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1392 if (number_slots == 0) {
1393 if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
1394 // use shared memory
1395 target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
1396 target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
1397 read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
1398 current_file_offset += read_count * read_size;
1399 } else {
1400 break;
1401 }
1402 } else {
1403 // get and save index (enumerator could modify)
1404 fseeko(the_index, current_file_offset, SEEK_SET);
1405 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1406 current_file_offset = ftello(the_index);
1407 total_slots -= read_count;
1408 }
1409
1410 if (remote_fd->task_is_64_bit) {
1411 for (i = 0; i < read_count; i++) {
1412 if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
1413 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
1414 pass_record.argument = target_64_index[i].argument;
1415 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
1416 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
1417 enumerator(pass_record, context);
1418 }
1419 }
1420 } else {
1421 for (i = 0; i < read_count; i++) {
1422 if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
1423 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
1424 pass_record.argument = target_32_index[i].argument;
1425 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
1426 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
1427 enumerator(pass_record, context);
1428 }
1429 }
1430 }
1431 } while (read_count);
1432
1433 } else { // searching for a single address' history
1434
1435 // update (read index file once and only once)
1436 update_cache_for_file_streams(remote_fd);
1437
1438 // get linked-list of events
1439 uint32_t collisions = 0;
1440 uint64_t located_file_position = 0;
1441 size_t hash = 0;
1442 index_ll_node *index_position_linked_list = NULL;
1443 if (remote_fd->task_is_64_bit) {
1444 hash = hash_index_64(address, remote_fd->cache->cache_node_capacity);
1445 do {
1446 if (remote_fd->cache->casted_table64[hash].address == address) { // hit!
1447 index_position_linked_list = remote_fd->cache->casted_table64[hash].linked_list;
1448 break;
1449 } else if (remote_fd->cache->casted_table64[hash].address == 0) { // failure!
1450 break;
1451 }
1452 hash++;
1453 if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
1454 } while (collisions <= remote_fd->cache->collision_allowance);
1455 } else {
1456 hash = hash_index_32((uint32_t)address, remote_fd->cache->cache_node_capacity);
1457 do {
1458 if (remote_fd->cache->casted_table32[hash].address == (uint32_t)address) { // hit!
1459 index_position_linked_list = remote_fd->cache->casted_table32[hash].linked_list;
1460 break;
1461 } else if (remote_fd->cache->casted_table32[hash].address == 0) { // failure!
1462 break;
1463 }
1464 hash++;
1465 if (hash >= remote_fd->cache->cache_node_capacity) hash = 0;
1466 } while (collisions <= remote_fd->cache->collision_allowance);
1467 }
1468
1469 // if we got something, run it
1470 char bufferSpace[128];
1471 size_t read_count = 0;
1472 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1473 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1474 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1475 while (index_position_linked_list) {
1476 located_file_position = index_position_linked_list->index_file_offset;
1477
1478 if (located_file_position >= remote_fd->cache->snapshot.start_index_offset) {
1479 if (remote_fd->cache->shmem && located_file_position >= remote_fd->cache->snapshot.start_index_offset && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)located_file_position) {
1480 // use shared memory
1481 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + located_file_position - remote_fd->cache->snapshot.start_index_offset);
1482 target_64_index = (stack_logging_index_event64*)target_32_index;
1483 read_count = 1;
1484 } else {
1485 err = KERN_FAILURE;
1486 break;
1487 }
1488 } else {
1489 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1490 read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1491 if (!read_count) {
1492 err = KERN_FAILURE;
1493 break;
1494 }
1495 }
1496 if (remote_fd->task_is_64_bit) {
1497 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[0].address);
1498 pass_record.argument = target_64_index[0].argument;
1499 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[0].offset_and_flags);
1500 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[0].offset_and_flags);
1501 enumerator(pass_record, context);
1502 } else {
1503 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[0].address);
1504 pass_record.argument = target_32_index[0].argument;
1505 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[0].offset_and_flags);
1506 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[0].offset_and_flags);
1507 enumerator(pass_record, context);
1508 }
1509 index_position_linked_list = index_position_linked_list->next;
1510 }
1511
1512 }
1513
1514 release_file_streams_for_task(task);
1515 return err;
1516 }
1517
1518
1519 kern_return_t
1520 __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1521 {
1522 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1523 if (remote_fd == NULL) {
1524 return KERN_FAILURE;
1525 }
1526
1527 // prepare for initial read
1528 FILE *stack_fd;
1529 stack_fd = (remote_fd->stack_file_stream);
1530 char bytes_buffer[16];
1531 stack_logging_backtrace_event *target_stack_event = (stack_logging_backtrace_event*)bytes_buffer;
1532 size_t read_size = sizeof(stack_logging_backtrace_event);
1533 size_t read_count = 0;
1534 off_t reading_offset = (off_t)stack_identifier;
1535
1536 // get a temporary spot for the backtrace frames to go and reference the stack space such that the reference
1537 // can be later pointed at the shared memory snapshot and data read from there.
1538 uint64_t temp_frames_buffer[STACK_LOGGING_MAX_STACK_SIZE];
1539 uint64_t *big_frames = (uint64_t*)temp_frames_buffer;
1540 uint32_t *small_frames = (uint32_t*)temp_frames_buffer;
1541 size_t target_frame_size = (remote_fd->task_is_64_bit ? sizeof(uint64_t) : sizeof(uint32_t));
1542 char *snapshot_backtrace_location = NULL;
1543
1544 int done = 0;
1545 int32_t total_frames = -1;
1546 int32_t hot_frames_read = 0;
1547 size_t new_hot_frames = 0;
1548 int32_t number_needed_hot_frames_in_event;
1549 size_t number_hot_frames_to_skip;
1550 int32_t i;
1551 bool skip_file_read;
1552
1553 while (!done) {
1554
1555 // not in cache; read record Ð from disk if possible, shared memory snapshot if necessary.
1556 if (remote_fd->cache->shmem && reading_offset >= (off_t)(remote_fd->cache->snapshot.start_stack_offset)) {
1557 // must read from shared memory; the record isn't on disk yet
1558 snapshot_backtrace_location = (remote_fd->cache->snapshot.stack_buffer + (reading_offset - remote_fd->cache->snapshot.start_stack_offset));
1559 *target_stack_event = *(stack_logging_backtrace_event*)snapshot_backtrace_location;
1560 big_frames = (uint64_t*)(snapshot_backtrace_location + sizeof(stack_logging_backtrace_event));
1561 small_frames = (uint32_t*)big_frames;
1562 skip_file_read = true;
1563 } else {
1564 // the record's on disk
1565 i = fseeko(stack_fd, reading_offset, SEEK_SET);
1566 if (i != 0) break; // unable to seek to the target position
1567 read_count = fread(target_stack_event, read_size, (size_t)1, stack_fd);
1568 if (read_count == 0) break;
1569
1570 big_frames = (uint64_t*)temp_frames_buffer;
1571 small_frames = (uint32_t*)temp_frames_buffer;
1572 skip_file_read = false;
1573 }
1574
1575 if (total_frames < 0) {
1576 total_frames = target_stack_event->num_new_hot_frames + target_stack_event->num_identical_frames;
1577 if (total_frames > (int32_t)max_stack_frames) break; // don't know what to do with this; we'll just KERN_FAILURE.
1578 }
1579
1580 // do the math to find how many frames to apply from previous event
1581 new_hot_frames = target_stack_event->num_new_hot_frames;
1582 number_needed_hot_frames_in_event = total_frames - hot_frames_read - target_stack_event->num_identical_frames;
1583 number_hot_frames_to_skip = new_hot_frames - number_needed_hot_frames_in_event;
1584
1585 // read and apply the important frames of this one
1586 if (number_needed_hot_frames_in_event > 0) {
1587 if (!skip_file_read) {
1588 read_count = fread(temp_frames_buffer, target_frame_size, new_hot_frames, stack_fd);
1589 if (read_count < new_hot_frames) break;
1590 }
1591
1592 if (remote_fd->task_is_64_bit) {
1593 for (i = 0; i < number_needed_hot_frames_in_event; i++) {
1594 stack_frames_buffer[hot_frames_read++] = big_frames[i+number_hot_frames_to_skip];
1595 }
1596 } else {
1597 for (i = 0; i < number_needed_hot_frames_in_event; i++) {
1598 stack_frames_buffer[hot_frames_read++] = small_frames[i+number_hot_frames_to_skip];
1599 }
1600 }
1601 }
1602
1603 reading_offset += target_stack_event->offset_delta;
1604
1605 if (hot_frames_read == total_frames) done = 1;
1606 else if (target_stack_event->offset_delta == 0) {
1607 fprintf(stderr, "incomplete stack record (identifier: 0x%qx)\n", reading_offset);
1608 break;
1609 }
1610 }
1611
1612 release_file_streams_for_task(task);
1613
1614 if (done) {
1615 *count = hot_frames_read;
1616 return KERN_SUCCESS;
1617 } else {
1618 return KERN_FAILURE;
1619 }
1620 }
1621
1622
1623 #ifdef TEST_DISK_STACK_LOGGING
1624
1625 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1626
1627 #include <sys/wait.h>
1628
1629 main()
1630 {
1631 int status;
1632 int i;
1633
1634 fprintf(stderr, "master test process is %d\n", getpid());
1635 fprintf(stderr, "sizeof stack_buffer: %d\n", sizeof(stack_buffer));
1636 fprintf(stderr, "sizeof thread_buffer: %d\n", sizeof(thread_buffer));
1637 fprintf(stderr, "sizeof stack_logs_directory: %d\n", sizeof(stack_logs_directory));
1638 fprintf(stderr, "sizeof remote_fds: %d\n", sizeof(remote_fds));
1639 fprintf(stderr, "address of pre_write_backtrace_event_buffer: %p\n", &pre_write_backtrace_event_buffer);
1640 fprintf(stderr, "address of logging_use_compaction: %p\n", &logging_use_compaction);
1641 // fprintf(stderr, "size of all global data: %d\n", (logging_use_compaction) - (pre_write_backtrace_event_buffer) + sizeof(logging_use_compaction));
1642
1643 create_log_files();
1644
1645 // create a few child processes and exit them cleanly so their logs should get cleaned up
1646 fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
1647 for (i = 0; i < 3; i++) {
1648 if (fork() == 0) {
1649 fprintf(stderr, "\nin child processes %d\n", getpid());
1650 create_log_files();
1651 fprintf(stderr, "exiting child processes %d\n", getpid());
1652 exit(1);
1653 }
1654 wait(&status);
1655 }
1656
1657 // create a few child processes and abruptly _exit them, leaving their logs around
1658 fprintf(stderr, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1659 for (i = 0; i < 3; i++) {
1660 if (fork() == 0) {
1661 fprintf(stderr, "\nin child processes %d\n", getpid());
1662 create_log_files();
1663 fprintf(stderr, "exiting child processes %d\n", getpid());
1664 _exit(1);
1665 }
1666 wait(&status);
1667 }
1668
1669 // this should reap any remaining logs
1670 fprintf(stderr, "\nexiting master test process %d\n", getpid());
1671 delete_log_files();
1672 }
1673
1674 #endif