]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging_disk.c
Libc-594.1.4.tar.gz
[apple/libc.git] / gen / stack_logging_disk.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <pthread.h>
38 #include <paths.h>
39 #include <errno.h>
40 #include "stack_logging.h"
41 #include "malloc_printf.h"
42 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
43
44 #pragma mark -
45 #pragma mark Defines
46
47 #ifdef TEST_DISK_STACK_LOGGING
48 #define _malloc_printf fprintf
49 #undef ASL_LEVEL_INFO
50 #define ASL_LEVEL_INFO stderr
51 #endif
52
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
56
57 #define BACKTRACE_UNIQUING_DEBUG 0
58
59 // The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
60 // 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
61 // increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
62 // performance O(c*n))
63 #define EXPAND_FACTOR 2
64 #define COLLISION_GROWTH_RATE 3
65
66 // For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
67 // Some useful numbers for the initial max collision value (desiring 66% fill):
68 // 16K-23K slots -> 16 collisions
69 // 24K-31K slots -> 17 collisions
70 // 32K-47K slots -> 18 collisions
71 // 48K-79K slots -> 19 collisions
72 // 80K-96K slots -> 20 collisions
73 #define INITIAL_MAX_COLLIDE 19
74 #define DEFAULT_UNIQUING_PAGE_SIZE 256
75
76 #pragma mark -
77 #pragma mark Macros
78
79 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
80 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
81 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
82
83 #pragma mark -
84 #pragma mark Types
85
86 typedef struct {
87 uintptr_t argument;
88 uintptr_t address;
89 uint64_t offset_and_flags; // top 8 bits are actually the flags!
90 } stack_logging_index_event;
91
92 typedef struct {
93 uint32_t argument;
94 uint32_t address;
95 uint64_t offset_and_flags; // top 8 bits are actually the flags!
96 } stack_logging_index_event32;
97
98 typedef struct {
99 uint64_t argument;
100 uint64_t address;
101 uint64_t offset_and_flags; // top 8 bits are actually the flags!
102 } stack_logging_index_event64;
103
104 #pragma pack(push,4)
105 typedef struct {
106 uint64_t numPages; // number of pages of the table
107 uint64_t numNodes;
108 uint64_t tableSize;
109 uint64_t untouchableNodes;
110 mach_vm_address_t table_address;
111 int32_t max_collide;
112 // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
113 // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
114 #if BACKTRACE_UNIQUING_DEBUG
115 uint64_t nodesFull;
116 uint64_t backtracesContained;
117 #endif
118 mach_vm_address_t *table; // allocated using vm_allocate()
119 } backtrace_uniquing_table;
120 #pragma pack(pop)
121
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
125 #pragma pack(push,4)
126 typedef struct {
127 uint64_t start_index_offset;
128 uint32_t next_free_index_buffer_offset;
129 mach_vm_address_t uniquing_table_address;
130 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
131 backtrace_uniquing_table *uniquing_table;
132 } stack_buffer_shared_memory;
133 #pragma pack(pop)
134
135 // target process address -> record table (for __mach_stack_logging_get_frames)
136 typedef struct {
137 uint64_t address;
138 uint64_t index_file_offset;
139 } remote_index_node;
140
141 // for caching index information client-side:
142 typedef struct {
143 size_t cache_size;
144 size_t cache_node_capacity;
145 uint32_t collision_allowance;
146 remote_index_node *table_memory; // this can be malloced; it's on the client side.
147 stack_buffer_shared_memory *shmem; // shared memory
148 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
149 uint32_t last_pre_written_index_size;
150 uint64_t last_index_file_offset;
151 backtrace_uniquing_table uniquing_table; // snapshot of the remote process' uniquing table
152 } remote_index_cache;
153
154 // for reading stack history information from remote processes:
155 typedef struct {
156 task_t remote_task;
157 pid_t remote_pid;
158 int32_t task_is_64_bit;
159 int32_t in_use_count;
160 FILE *index_file_stream;
161 remote_index_cache *cache;
162 } remote_task_file_streams;
163
164 #pragma mark -
165 #pragma mark Constants/Globals
166
167 static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
168
169 // support for multi-threaded forks
170 extern void __stack_logging_fork_prepare();
171 extern void __stack_logging_fork_parent();
172 extern void __stack_logging_fork_child();
173
174 // support for gdb and others checking for stack_logging locks
175 __private_extern__ boolean_t __stack_logging_locked();
176
177 // single-thread access variables
178 static stack_buffer_shared_memory *pre_write_buffers;
179 static vm_address_t *stack_buffer;
180 static uintptr_t last_logged_malloc_address = 0;
181
182 // Constants to define stack logging file path names.
183 // Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
184 // unless the base directory is specified otherwise with MallocStackLoggingDirectory.
185 // In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
186 static const char *stack_log_file_base_name = "stack-logs.";
187 static const char *stack_log_file_suffix = ".index";
188 static const char *stack_log_link_suffix = ".link";
189
190 static char stack_log_location[PATH_MAX];
191 static char stack_log_reference_file[PATH_MAX];
192 static char index_file_path[PATH_MAX];
193 static int index_file_descriptor = -1;
194
195 // for accessing remote log files
196 static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
197 static uint32_t next_remote_task_fd = 0;
198 static uint32_t remote_task_fd_count = 0;
199 static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
200
201 // activation variables
202 static int logging_use_compaction = 1; // set this to zero to always disable compaction.
203
204 // We set malloc_logger to NULL to disable logging, if we encounter errors
205 // during file writing
206 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
207 extern malloc_logger_t *malloc_logger;
208
209 #pragma mark -
210 #pragma mark In-Memory Backtrace Uniquing
211
212 static __attribute__((always_inline))
213 inline void*
214 allocate_pages(uint64_t memSize)
215 {
216 mach_vm_address_t allocatedMem = 0ull;
217 if (mach_vm_allocate(mach_task_self(), &allocatedMem, memSize, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)) != KERN_SUCCESS) {
218 malloc_printf("allocate_pages(): virtual memory exhaused!\n");
219 }
220 return (void*)(uintptr_t)allocatedMem;
221 }
222
223 static __attribute__((always_inline))
224 inline int
225 deallocate_pages(void* memPointer, uint64_t memSize)
226 {
227 return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)memPointer, memSize);
228 }
229
230 static backtrace_uniquing_table*
231 __create_uniquing_table(void)
232 {
233 backtrace_uniquing_table *uniquing_table = (backtrace_uniquing_table*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table)));
234 if (!uniquing_table) return NULL;
235 bzero(uniquing_table, sizeof(backtrace_uniquing_table));
236 uniquing_table->numPages = DEFAULT_UNIQUING_PAGE_SIZE;
237 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
238 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
239 uniquing_table->table = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
240 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
241 uniquing_table->max_collide = INITIAL_MAX_COLLIDE;
242 uniquing_table->untouchableNodes = 0;
243
244 #if BACKTRACE_UNIQUING_DEBUG
245 malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table->tableSize >> 10, uniquing_table->tableSize >> 20, uniquing_table->numNodes, uniquing_table->untouchableNodes);
246 malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table->table, (void*)((uintptr_t)uniquing_table->table + (uintptr_t)uniquing_table->tableSize));
247 #endif
248 return uniquing_table;
249 }
250
251 static void
252 __expand_uniquing_table(backtrace_uniquing_table *uniquing_table)
253 {
254 mach_vm_address_t *oldTable = uniquing_table->table;
255 uint64_t oldsize = uniquing_table->tableSize;
256 uint64_t oldnumnodes = uniquing_table->numNodes;
257
258 uniquing_table->numPages = uniquing_table->numPages << EXPAND_FACTOR;
259 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
260 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
261 mach_vm_address_t *newTable = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
262
263 uniquing_table->table = newTable;
264 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
265 uniquing_table->max_collide = uniquing_table->max_collide + COLLISION_GROWTH_RATE;
266
267 if (mach_vm_copy(mach_task_self(), (mach_vm_address_t)(uintptr_t)oldTable, oldsize, (mach_vm_address_t)(uintptr_t)newTable) != KERN_SUCCESS) {
268 malloc_printf("expandUniquingTable(): VMCopyFailed\n");
269 }
270 uniquing_table->untouchableNodes = oldnumnodes;
271
272 #if BACKTRACE_UNIQUING_DEBUG
273 malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
274 uniquing_table->nodesFull, oldnumnodes, (int)(((uniquing_table->nodesFull * 100.0) / (double)oldnumnodes) + 0.5),
275 uniquing_table->numNodes, uniquing_table->untouchableNodes, uniquing_table->backtracesContained);
276 malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable, (void*)((uintptr_t)newTable + (uintptr_t)(uniquing_table->tableSize)));
277 malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable, (void*)((uintptr_t)oldTable + (uintptr_t)oldsize));
278 #endif
279
280 if (deallocate_pages(oldTable, oldsize) != KERN_SUCCESS) {
281 malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table->table);
282 }
283 }
284
285 static int
286 __enter_frames_in_table(backtrace_uniquing_table *uniquing_table, uint64_t *foundIndex, mach_vm_address_t *frames, int32_t count)
287 {
288 // The hash values need to be the same size as the addresses (because we use the value -1), for clarity, define a new type
289 typedef mach_vm_address_t hash_index_t;
290
291 mach_vm_address_t thisPC;
292 hash_index_t hash, uParent = (hash_index_t)(-1ll), modulus = (uniquing_table->numNodes-uniquing_table->untouchableNodes-1);
293 int32_t collisions, lcopy = count, returnVal = 1;
294 hash_index_t hash_multiplier = ((uniquing_table->numNodes - uniquing_table->untouchableNodes)/(uniquing_table->max_collide*2+1));
295 mach_vm_address_t *node;
296 while (--lcopy >= 0) {
297 thisPC = frames[lcopy];
298
299 // hash = initialHash(uniquing_table, uParent, thisPC);
300 hash = uniquing_table->untouchableNodes + (((uParent << 4) ^ (thisPC >> 2)) % modulus);
301 collisions = uniquing_table->max_collide;
302
303 while (collisions--) {
304 node = uniquing_table->table + (hash * 2);
305
306 if (*node == 0 && node[1] == 0) {
307 // blank; store this entry!
308 // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
309 node[0] = thisPC;
310 node[1] = uParent;
311 uParent = hash;
312 #if BACKTRACE_UNIQUING_DEBUG
313 uniquing_table->nodesFull++;
314 if (lcopy == 0) {
315 uniquing_table->backtracesContained++;
316 }
317 #endif
318 break;
319 }
320 if (*node == thisPC && node[1] == uParent) {
321 // hit! retrieve index and go.
322 uParent = hash;
323 break;
324 }
325
326 hash += collisions * hash_multiplier + 1;
327
328 if (hash >= uniquing_table->numNodes) {
329 hash -= (uniquing_table->numNodes - uniquing_table->untouchableNodes); // wrap around.
330 }
331 }
332
333 if (collisions < 0) {
334 returnVal = 0;
335 break;
336 }
337 }
338
339 if (returnVal) *foundIndex = uParent;
340
341 return returnVal;
342 }
343
344 static void
345 __unwind_stack_from_table_index(backtrace_uniquing_table *uniquing_table, uint64_t index_pos, mach_vm_address_t *out_frames_buffer, uint32_t *out_frames_count, uint32_t max_frames)
346 {
347 mach_vm_address_t *node = uniquing_table->table + (index_pos * 2);
348 uint32_t foundFrames = 0;
349 if (index_pos < uniquing_table->numNodes) {
350 while (foundFrames < max_frames) {
351 out_frames_buffer[foundFrames++] = node[0];
352 if (node[1] == (mach_vm_address_t)(-1ll)) break;
353 node = uniquing_table->table + (node[1] * 2);
354 }
355 }
356
357 *out_frames_count = foundFrames;
358 }
359
360 #pragma mark -
361 #pragma mark Disk Stack Logging
362
363 static void delete_log_files(void); // pre-declare
364 static int delete_logging_file(char *log_location);
365
366 static void
367 append_int(char * filename, pid_t pid, size_t maxLength)
368 {
369 size_t len = strlen(filename);
370
371 uint32_t count = 0;
372 pid_t value = pid;
373 while (value > 0) {
374 value /= 10;
375 count++;
376 }
377
378 if (len + count >= maxLength) return; // don't modify the string if it would violate maxLength
379
380 filename[len + count] = '\0';
381
382 value = pid;
383 uint32_t i;
384 for (i = 0 ; i < count ; i ++) {
385 filename[len + count - 1 - i] = '0' + value % 10;
386 value /= 10;
387 }
388 }
389
390 // If successful, returns path to log file that was created. Otherwise returns NULL.
391 static char *
392 create_log_file(void)
393 {
394 pid_t pid = getpid();
395 const char *progname = getprogname();
396 char *created_log_location = NULL;
397
398 // WARNING! use of snprintf can induce malloc() calls
399 bool use_alternate_location = false;
400 char *evn_log_directory = getenv("MallocStackLoggingDirectory");
401 if (evn_log_directory && *evn_log_directory) {
402 use_alternate_location = true;
403 strlcpy(stack_log_location, evn_log_directory, (size_t)PATH_MAX);
404 size_t evn_log_len = strlen(stack_log_location);
405 // add the '/' only if it's not already there.
406 if (evn_log_directory[evn_log_len-1] != '/') {
407 strlcat(stack_log_location, "/", (size_t)PATH_MAX);
408 }
409 } else {
410 strlcpy(stack_log_location, _PATH_TMP, (size_t)PATH_MAX);
411 }
412
413 strlcat(stack_log_location, stack_log_file_base_name, (size_t)PATH_MAX);
414 append_int(stack_log_location, pid, (size_t)PATH_MAX);
415 if (progname && progname[0] != '\0') {
416 strlcat(stack_log_location, ".", (size_t)PATH_MAX);
417 strlcat(stack_log_location, progname, (size_t)PATH_MAX);
418 }
419 if (!use_alternate_location) strlcat(stack_log_location, ".XXXXXX", (size_t)PATH_MAX);
420 strlcat(stack_log_location, stack_log_file_suffix, (size_t)PATH_MAX);
421
422 // in the case where the user has specified an alternate location, drop a reference file
423 // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
424 // stack logging file there.
425 if (use_alternate_location) {
426 strlcpy(stack_log_reference_file, _PATH_TMP, (size_t)PATH_MAX);
427 strlcat(stack_log_reference_file, stack_log_file_base_name, (size_t)PATH_MAX);
428 append_int(stack_log_reference_file, pid, (size_t)PATH_MAX);
429 if (progname && progname[0] != '\0') {
430 strlcat(stack_log_reference_file, ".", (size_t)PATH_MAX);
431 strlcat(stack_log_reference_file, progname, (size_t)PATH_MAX);
432 }
433 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
434 strlcat(stack_log_reference_file, stack_log_link_suffix, (size_t)PATH_MAX);
435
436 int link_file_descriptor = mkstemps(stack_log_reference_file, (int)strlen(stack_log_link_suffix));
437 if (link_file_descriptor == -1) {
438 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack reference file at %s\n", stack_log_location);
439 return NULL;
440 }
441 ssize_t written = write(link_file_descriptor, stack_log_location, strlen(stack_log_location));
442 if (written < (ssize_t)strlen(stack_log_location)) {
443 _malloc_printf(ASL_LEVEL_INFO, "unable to write to stack reference file at %s\n", stack_log_location);
444 return NULL;
445 }
446 const char *description_string = "\n(This is a reference file to the stack logs at the path above.)\n";
447 write(link_file_descriptor, description_string, strlen(description_string));
448 close(link_file_descriptor);
449 }
450
451 // Securely create the log file.
452 if ((index_file_descriptor = mkstemps(stack_log_location, (int)strlen(stack_log_file_suffix))) != -1) {
453 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_log_location);
454 created_log_location = stack_log_location;
455 } else {
456 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack logs at %s\n", stack_log_location);
457 if (use_alternate_location) delete_logging_file(stack_log_reference_file);
458 stack_log_reference_file[0] = '\0';
459 stack_log_location[0] = '\0';
460 created_log_location = NULL;
461 }
462 return created_log_location;
463 }
464
465 // Check to see if the log file is actually a reference to another location
466 static int
467 log_file_is_reference(char *log_location, char *out_reference_loc_buffer, size_t max_reference_path_size)
468 {
469 if (log_location == NULL || log_location[0] == '\0') return 0;
470
471 size_t log_len = strlen(log_location);
472 size_t link_suffix_len = strlen(stack_log_link_suffix);
473 if (log_len < link_suffix_len || strncmp(log_location+log_len-link_suffix_len, stack_log_link_suffix, link_suffix_len) != 0) {
474 // not a reference file.
475 return 0;
476 }
477
478 if (!out_reference_loc_buffer || max_reference_path_size == 0) return 1;
479
480 FILE *reference_file = fopen(log_location, "r");
481 if (reference_file == NULL) {
482 // if unable to open the file, it may be because another user created it; no need to warn.
483 out_reference_loc_buffer[0] = '\0';
484 return 1;
485 }
486
487 char *ret = fgets(out_reference_loc_buffer, (int)max_reference_path_size, reference_file);
488 if (!ret) {
489 out_reference_loc_buffer[0] = '\0';
490 _malloc_printf(ASL_LEVEL_INFO, "unable to read from stack logging reference file at %s\n", log_location);
491 return 1;
492 } else {
493 size_t read_line_len = strlen(out_reference_loc_buffer);
494 if (read_line_len >= 1 && out_reference_loc_buffer[read_line_len-1] == '\n') {
495 out_reference_loc_buffer[read_line_len-1] = '\0';
496 }
497 }
498
499 fclose(reference_file);
500
501 return 1;
502 }
503
504 // This function may be called from either the target process when exiting, or from either the the target process or
505 // a stack log analysis process, when reaping orphaned stack log files.
506 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
507 static int
508 delete_logging_file(char *log_location)
509 {
510 if (log_location == NULL || log_location[0] == '\0') return 0;
511
512 struct stat statbuf;
513 if (unlink(log_location) != 0 && stat(log_location, &statbuf) == 0) {
514 return -1;
515 }
516 return 0;
517 }
518
519 // This function will be called from atexit() in the target process.
520 static void
521 delete_log_files(void)
522 {
523 if (stack_log_location && stack_log_location[0]) {
524 if (delete_logging_file(stack_log_location) == 0) {
525 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_log_location);
526 index_file_path[0] = '\0';
527 } else {
528 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_log_location);
529 }
530 }
531 if (stack_log_reference_file && stack_log_reference_file[0]) {
532 delete_logging_file(stack_log_reference_file);
533 }
534 }
535
536 static bool
537 is_process_running(pid_t pid)
538 {
539 struct kinfo_proc kpt[1];
540 size_t size = sizeof(struct kinfo_proc);
541 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
542
543 sysctl(mib, 4, kpt, &size, NULL, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
544
545 return (size==sizeof(struct kinfo_proc));
546 }
547
548 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
549 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
550 // So, reap any stack log files for processes that no longer exist.
551 //
552 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
553 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
554 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
555 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
556 // its own log files.
557 static void
558 reap_orphaned_log_files(bool remove_for_this_pid)
559 {
560 DIR *dp;
561 struct dirent *entry;
562 char prefix_name[PATH_MAX];
563 char pathname[PATH_MAX];
564 pid_t current_pid = getpid();
565
566 if ((dp = opendir(_PATH_TMP)) == NULL) {
567 return;
568 }
569
570 strlcpy(prefix_name, stack_log_file_base_name, (size_t)PATH_MAX);
571 size_t prefix_length = strlen(prefix_name);
572
573 while ( (entry = readdir(dp)) != NULL ) {
574 if ( entry->d_type != DT_DIR && entry->d_type != DT_LNK && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
575 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
576 if ( (! is_process_running((pid_t)pid)) || (remove_for_this_pid && (pid_t)pid == current_pid) ) {
577 strlcpy(pathname, _PATH_TMP, (size_t)PATH_MAX);
578 strlcat(pathname, entry->d_name, (size_t)PATH_MAX);
579 char reference_file_buffer[PATH_MAX];
580 bool pathname_is_ref_file = false;
581 if (log_file_is_reference(pathname, reference_file_buffer, (size_t)PATH_MAX) && *reference_file_buffer) {
582 pathname_is_ref_file = true;
583 if (delete_logging_file(reference_file_buffer) == 0) {
584 if (remove_for_this_pid && pid == current_pid) {
585 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", reference_file_buffer);
586 } else {
587 _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, reference_file_buffer);
588 }
589 }
590 }
591 if (delete_logging_file(pathname) == 0) {
592 if (remove_for_this_pid && pid == current_pid) {
593 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
594 } else {
595 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, pathname);
596 }
597 char shmem_name_string[PATH_MAX];
598 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
599 append_int(shmem_name_string, (pid_t)pid, (size_t)PATH_MAX);
600 if (pid != current_pid) shm_unlink(shmem_name_string);
601 }
602 }
603 }
604 }
605 closedir(dp);
606 }
607
608 /*
609 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
610 * for disabling any future logging in this process and for informing the user.
611 */
612 static void
613 disable_stack_logging(void)
614 {
615 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
616 stack_logging_enable_logging = 0;
617 malloc_logger = NULL;
618 }
619
620 /* A wrapper around write() that will try to reopen the index/stack file and
621 * write to it if someone closed it underneath us (e.g. the process we just
622 * started decide to close all file descriptors except stin/err/out). Some
623 * programs like to do that and calling abort() on them is rude.
624 */
625 static ssize_t
626 robust_write(int fd, const void *buf, size_t nbyte) {
627 extern int errno;
628 ssize_t written = write(fd, buf, nbyte);
629 if (written == -1 && errno == EBADF) {
630 char *file_to_reopen = NULL;
631 int *fd_to_reset = NULL;
632
633 // descriptor was closed on us. We need to reopen it
634 if (fd == index_file_descriptor) {
635 file_to_reopen = index_file_path;
636 fd_to_reset = &index_file_descriptor;
637 } else {
638 // We don't know about this file. Return (and abort()).
639 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor; expecting stack logging index file\n");
640 return -1;
641 }
642
643 // The file *should* already exist. If not, fail.
644 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
645 if (fd < 3) {
646 // If we somehow got stdin/out/err, we need to relinquish them and
647 // get another fd.
648 int fds_to_close[3] = { 0 };
649 while (fd < 3) {
650 if (fd == -1) {
651 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack logging file %s\n", file_to_reopen);
652 delete_log_files();
653 return -1;
654 }
655 fds_to_close[fd] = 1;
656 fd = dup(fd);
657 }
658
659 // We have an fd we like. Close the ones we opened.
660 if (fds_to_close[0]) close(0);
661 if (fds_to_close[1]) close(1);
662 if (fds_to_close[2]) close(2);
663 }
664
665 *fd_to_reset = fd;
666 written = write(fd, buf, nbyte);
667 }
668 return written;
669 }
670
671 static void
672 flush_data(void)
673 {
674 ssize_t written; // signed size_t
675 size_t remaining;
676 char * p;
677
678 if (index_file_descriptor == -1) {
679 if (create_log_file() == NULL) {
680 return;
681 }
682 }
683
684 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
685 p = pre_write_buffers->index_buffer;
686 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
687 while (remaining > 0) {
688 written = robust_write(index_file_descriptor, p, remaining);
689 if (written == -1) {
690 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", index_file_path, strerror(errno));
691 disable_stack_logging();
692 return;
693 }
694 p += written;
695 remaining -= written;
696 }
697
698 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
699 pre_write_buffers->next_free_index_buffer_offset = 0;
700 }
701
702 static void
703 prepare_to_log_stacks(void)
704 {
705 if (!pre_write_buffers) {
706 last_logged_malloc_address = 0ul;
707 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
708
709 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
710 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
711 // the contents of these buffers don't change while being inspected.
712 char shmem_name_string[PATH_MAX];
713 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
714 append_int(shmem_name_string, getpid(), (size_t)PATH_MAX);
715
716 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
717 if (shmid < 0) {
718 // Failed to create shared memory region; turn off stack logging.
719 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
720 disable_stack_logging();
721 return;
722 }
723
724 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
725 ftruncate(shmid, (off_t)full_shared_mem_size);
726 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
727 close(shmid);
728
729 if (!pre_write_buffers) {
730 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
731 disable_stack_logging();
732 return;
733 }
734
735 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
736 pre_write_buffers->start_index_offset = 0ull;
737 pre_write_buffers->next_free_index_buffer_offset = 0;
738
739 // create the backtrace uniquing table
740 pre_write_buffers->uniquing_table = __create_uniquing_table();
741 pre_write_buffers->uniquing_table_address = (mach_vm_address_t)(uintptr_t)pre_write_buffers->uniquing_table;
742 if (!pre_write_buffers->uniquing_table) {
743 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack uniquing table\n");
744 disable_stack_logging();
745 return;
746 }
747
748 stack_buffer = (vm_address_t*)allocate_pages((uint64_t)round_page(sizeof(vm_address_t) * STACK_LOGGING_MAX_STACK_SIZE));
749 if (!stack_buffer) {
750 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack trace buffer\n");
751 disable_stack_logging();
752 return;
753 }
754
755 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
756 atexit(delete_log_files); // atexit() can call malloc()
757 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
758
759 // this call ensures that the log files exist; analyzing processes will rely on this assumption.
760 if (create_log_file() == NULL) {
761 disable_stack_logging();
762 return;
763 }
764 }
765 }
766
767 void
768 __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
769 {
770 if (!stack_logging_enable_logging) return;
771
772 // check incoming data
773 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
774 uintptr_t swapper = size;
775 size = ptr_arg;
776 ptr_arg = swapper;
777 if (ptr_arg == return_val) return; // realloc had no effect, skipping
778
779 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
780 type_flags ^= stack_logging_type_dealloc;
781 } else {
782 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
783 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
784 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
785 return;
786 }
787 }
788 if (type_flags & stack_logging_type_dealloc) {
789 if (size) {
790 ptr_arg = size;
791 size = 0;
792 } else return; // free(nil)
793 }
794 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
795
796 type_flags &= 0x7;
797
798 // now actually begin
799 prepare_to_log_stacks();
800
801 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
802 if (!stack_logging_enable_logging) return;
803 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
804
805 // lock and enter
806 OSSpinLockLock(&stack_logging_lock);
807
808 if (!stack_logging_enable_logging) {
809 OSSpinLockUnlock(&stack_logging_lock);
810 return;
811 }
812
813 // compaction
814 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
815 // *waves hand* the last allocation never occurred
816 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
817 last_logged_malloc_address = 0ul;
818
819 OSSpinLockUnlock(&stack_logging_lock);
820 return;
821 }
822
823 // gather stack
824 uint32_t count;
825 thread_stack_pcs(stack_buffer, STACK_LOGGING_MAX_STACK_SIZE-1, &count); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
826 stack_buffer[count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
827 num_hot_to_skip += 2;
828 if (count <= num_hot_to_skip) {
829 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
830 OSSpinLockUnlock(&stack_logging_lock);
831 return;
832 }
833
834 // unique stack in memory
835 count -= num_hot_to_skip;
836 #if __LP64__
837 mach_vm_address_t *frames = (mach_vm_address_t*)stack_buffer + num_hot_to_skip;
838 #else
839 mach_vm_address_t frames[STACK_LOGGING_MAX_STACK_SIZE];
840 uint32_t i;
841 for (i = 0; i < count; i++) {
842 frames[i] = stack_buffer[i+num_hot_to_skip];
843 }
844 #endif
845
846 uint64_t uniqueStackIdentifier = (uint64_t)(-1ll);
847 while (!__enter_frames_in_table(pre_write_buffers->uniquing_table, &uniqueStackIdentifier, frames, (int32_t)count)) {
848 __expand_uniquing_table(pre_write_buffers->uniquing_table);
849 }
850
851 stack_logging_index_event current_index;
852 if (type_flags & stack_logging_type_alloc) {
853 current_index.address = STACK_LOGGING_DISGUISE(return_val);
854 current_index.argument = size;
855 if (logging_use_compaction) {
856 last_logged_malloc_address = current_index.address; // disguised
857 }
858 } else {
859 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
860 current_index.argument = 0ul;
861 last_logged_malloc_address = 0ul;
862 }
863 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier, type_flags);
864
865 // the following line is a good debugging tool for logging each allocation event as it happens.
866 // malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
867
868 // flush the data buffer to disk if necessary
869 if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
870 flush_data();
871 }
872
873 // store bytes in buffers
874 memcpy(pre_write_buffers->index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
875 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
876
877 OSSpinLockUnlock(&stack_logging_lock);
878 }
879
880 void
881 __stack_logging_fork_prepare() {
882 OSSpinLockLock(&stack_logging_lock);
883 }
884
885 void
886 __stack_logging_fork_parent() {
887 OSSpinLockUnlock(&stack_logging_lock);
888 }
889
890 void
891 __stack_logging_fork_child() {
892 malloc_logger = NULL;
893 stack_logging_enable_logging = 0;
894 OSSpinLockUnlock(&stack_logging_lock);
895 }
896
897 boolean_t
898 __stack_logging_locked()
899 {
900 bool acquired_lock = OSSpinLockTry(&stack_logging_lock);
901 if (acquired_lock) OSSpinLockUnlock(&stack_logging_lock);
902 return (acquired_lock ? false : true);
903 }
904
905 #pragma mark -
906 #pragma mark Remote Stack Log Access
907
908 #pragma mark - Design notes:
909
910 /*
911
912 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
913 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
914 // Gets the last allocation record about address
915
916 if !address, will load index and iterate through (expensive)
917 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
918 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
919 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
920
921 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
922 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
923 // Given a uniqued_stack fills stack_frames_buffer
924
925 */
926
927 #pragma mark - caching
928
929 __attribute__((always_inline)) static inline size_t
930 hash_index(uint64_t address, size_t max_pos) {
931 return (size_t)((address >> 2) % (max_pos-1)); // simplicity rules.
932 }
933
934 __attribute__((always_inline)) static inline size_t
935 hash_multiplier(size_t capacity, uint32_t allowed_collisions) {
936 return (capacity/(allowed_collisions*2+1));
937 }
938
939 __attribute__((always_inline)) static inline size_t
940 next_hash(size_t hash, size_t multiplier, size_t capacity, uint32_t collisions) {
941 hash += multiplier * collisions;
942 if (hash >= capacity) hash -= capacity;
943 return hash;
944 }
945
946 static void
947 transfer_node(remote_index_cache *cache, remote_index_node *old_node)
948 {
949 uint32_t collisions = 0;
950 size_t pos = hash_index(old_node->address, cache->cache_node_capacity);
951 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
952 do {
953 if (cache->table_memory[pos].address == old_node->address) { // hit like this shouldn't happen.
954 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node)\n");
955 break;
956 } else if (cache->table_memory[pos].address == 0) { // empty
957 cache->table_memory[pos] = *old_node;
958 break;
959 } else {
960 collisions++;
961 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
962 }
963 } while (collisions <= cache->collision_allowance);
964
965 if (collisions > cache->collision_allowance) {
966 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
967 }
968 }
969
970 static void
971 expand_cache(remote_index_cache *cache)
972 {
973 // keep old stats
974 size_t old_node_capacity = cache->cache_node_capacity;
975 remote_index_node *old_table = cache->table_memory;
976
977 // double size
978 cache->cache_size <<= 2;
979 cache->cache_node_capacity <<= 2;
980 cache->collision_allowance += 3;
981 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
982
983 // repopulate (expensive!)
984 size_t i;
985 for (i = 0; i < old_node_capacity; i++) {
986 if (old_table[i].address) {
987 transfer_node(cache, &old_table[i]);
988 }
989 }
990 free(old_table);
991 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
992 }
993
994 static void
995 insert_node(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
996 {
997 uint32_t collisions = 0;
998 size_t pos = hash_index(address, cache->cache_node_capacity);
999 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1000
1001 bool inserted = false;
1002 while (!inserted) {
1003 if (cache->table_memory[pos].address == 0ull || cache->table_memory[pos].address == address) { // hit or empty
1004 cache->table_memory[pos].address = address;
1005 cache->table_memory[pos].index_file_offset = index_file_offset;
1006 inserted = true;
1007 break;
1008 }
1009
1010 collisions++;
1011 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1012
1013 if (collisions > cache->collision_allowance) {
1014 expand_cache(cache);
1015 pos = hash_index(address, cache->cache_node_capacity);
1016 multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1017 collisions = 0;
1018 }
1019 }
1020
1021 }
1022
1023 static void
1024 update_cache_for_file_streams(remote_task_file_streams *descriptors)
1025 {
1026 remote_index_cache *cache = descriptors->cache;
1027
1028 // create from scratch if necessary.
1029 if (!cache) {
1030 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
1031 cache->cache_node_capacity = 1 << 14;
1032 cache->collision_allowance = 17;
1033 cache->last_index_file_offset = 0;
1034 cache->cache_size = cache->cache_node_capacity*sizeof(remote_index_node);
1035 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1036
1037 // now map in the shared memory, if possible
1038 char shmem_name_string[PATH_MAX];
1039 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
1040 append_int(shmem_name_string, descriptors->remote_pid, (size_t)PATH_MAX);
1041
1042 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
1043 if (shmid >= 0) {
1044 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
1045 close(shmid);
1046 }
1047
1048 if (shmid < 0 || cache->shmem == NULL) {
1049 // failed to connect to the shared memory region; warn and continue.
1050 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1051 }
1052 }
1053
1054 // suspend and see how much updating there is to do. there are three scenarios, listed below
1055 bool update_snapshot = false;
1056 if (descriptors->remote_task != mach_task_self()) {
1057 task_suspend(descriptors->remote_task);
1058 }
1059
1060 struct stat file_statistics;
1061 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1062 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1063 uint64_t read_this_update = 0;
1064
1065 // the delta indecies is a complex number; there are three cases:
1066 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1067 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1068 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1069 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1070 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1071 uint32_t last_snapshot_scan_index = 0;
1072 if (delta_indecies && cache->shmem) {
1073 // case 3: add cache scanned to known from disk and recalc
1074 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1075 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1076 update_snapshot = true;
1077 } else if (cache->shmem) {
1078 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1079 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1080 }
1081
1082 // no update necessary for the file; check if need a snapshot.
1083 if (delta_indecies == 0) {
1084 if (cache->shmem && !update_snapshot) {
1085 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1086 }
1087 }
1088
1089 // if a snapshot is necessary, memcpy from remote frozen process' memory
1090 // note: there were two ways to do this – spin lock or suspend. suspend allows us to
1091 // analyze processes even if they were artificially suspended. with a lock, there'd be
1092 // worry that the target was suspended with the lock taken.
1093 if (update_snapshot) {
1094 memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
1095 // also need to update our version of the remote uniquing table
1096 vm_address_t local_uniquing_address = 0ul;
1097 mach_msg_type_number_t local_uniquing_size = 0;
1098 mach_vm_size_t desired_size = round_page(sizeof(backtrace_uniquing_table));
1099 kern_return_t err;
1100 if ((err = mach_vm_read(descriptors->remote_task, cache->shmem->uniquing_table_address, desired_size, &local_uniquing_address, &local_uniquing_size)) != KERN_SUCCESS
1101 || local_uniquing_size != desired_size) {
1102 fprintf(stderr, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err, mach_error_string(err));
1103 } else {
1104 // the mach_vm_read was successful, so acquire the uniquing table
1105
1106 // need to re-read the table, so deallocate the current memory
1107 if (cache->uniquing_table.table) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)(cache->uniquing_table.table), cache->uniquing_table.tableSize);
1108
1109 // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
1110 // remote process. this pointer will be mapped shared in a few lines.
1111 cache->uniquing_table = *((backtrace_uniquing_table*)local_uniquing_address);
1112
1113 vm_address_t local_table_address = 0ul;
1114 mach_msg_type_number_t local_table_size = 0;
1115
1116 err = mach_vm_read(descriptors->remote_task, cache->uniquing_table.table_address, cache->uniquing_table.tableSize, &local_table_address, &local_table_size);
1117 if (err == KERN_SUCCESS) cache->uniquing_table.table = (mach_vm_address_t*)local_table_address;
1118 else cache->uniquing_table.table = NULL;
1119
1120 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)local_uniquing_address, (mach_vm_size_t)local_uniquing_size);
1121 }
1122 }
1123
1124 // resume
1125 if (descriptors->remote_task != mach_task_self()) {
1126 task_resume(descriptors->remote_task);
1127 }
1128
1129 if (!update_snapshot && delta_indecies == 0) return; // absolutely no updating needed.
1130
1131 FILE *the_index = (descriptors->index_file_stream);
1132
1133 // prepare for the read; target process could be 32 or 64 bit.
1134
1135 stack_logging_index_event32 *target_32_index = NULL;
1136 stack_logging_index_event64 *target_64_index = NULL;
1137
1138 // perform the update from the file
1139 uint32_t i;
1140 if (delta_indecies) {
1141 char bufferSpace[4096]; // 4 kb
1142 target_32_index = (stack_logging_index_event32*)bufferSpace;
1143 target_64_index = (stack_logging_index_event64*)bufferSpace;
1144 size_t number_slots = (size_t)(4096/read_size);
1145
1146 size_t read_count = 0;
1147 if (fseeko(the_index, (off_t)(cache->last_index_file_offset), SEEK_SET)) {
1148 fprintf(stderr, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1149 }
1150 off_t current_index_position = cache->last_index_file_offset;
1151 do {
1152 number_slots = (size_t)MIN(delta_indecies - read_this_update, number_slots);
1153 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1154 if (descriptors->task_is_64_bit) {
1155 for (i = 0; i < read_count; i++) {
1156 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
1157 read_this_update++;
1158 current_index_position += read_size;
1159 }
1160 } else {
1161 for (i = 0; i < read_count; i++) {
1162 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
1163 read_this_update++;
1164 current_index_position += read_size;
1165 }
1166 }
1167 } while (read_count);
1168
1169 if (read_this_update < delta_indecies) {
1170 fprintf(stderr, "insufficient data in remote stack index file; expected more records.\n");
1171 }
1172 cache->last_index_file_offset += read_this_update * read_size;
1173 }
1174
1175 if (update_snapshot) {
1176 target_32_index = (stack_logging_index_event32*)(cache->snapshot.index_buffer);
1177 target_64_index = (stack_logging_index_event64*)(cache->snapshot.index_buffer);
1178
1179 uint32_t free_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1180 off_t current_index_position = cache->snapshot.start_index_offset;
1181 if (descriptors->task_is_64_bit) {
1182 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1183 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1184 }
1185 } else {
1186 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1187 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1188 }
1189 }
1190 }
1191 }
1192
1193 static void
1194 destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
1195 {
1196 if (descriptors->cache->shmem) {
1197 munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
1198 }
1199 free(descriptors->cache->table_memory);
1200 free(descriptors->cache);
1201 descriptors->cache = NULL;
1202 }
1203
1204 #pragma mark - internal
1205
1206 // In the stack log analysis process, find the stack logging files for target process <pid>
1207 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1208 // If we find such a directory then open the stack logging files in there.
1209 static void
1210 open_log_files(pid_t pid, remote_task_file_streams *this_task_streams)
1211 {
1212 DIR *dp;
1213 struct dirent *entry;
1214 char prefix_name[PATH_MAX];
1215 char pathname[PATH_MAX];
1216
1217 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1218
1219 if ((dp = opendir(_PATH_TMP)) == NULL) {
1220 return;
1221 }
1222
1223 // It's OK to use snprintf in this routine since it should only be called by the clients
1224 // of stack logging, and thus calls to malloc are OK.
1225 snprintf(prefix_name, (size_t)PATH_MAX, "%s%d.", stack_log_file_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1226 size_t prefix_length = strlen(prefix_name);
1227
1228 while ( (entry = readdir(dp)) != NULL ) {
1229 if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
1230 snprintf(pathname, (size_t)PATH_MAX, "%s%s", _PATH_TMP, entry->d_name);
1231 char reference_file[PATH_MAX];
1232 if (log_file_is_reference(pathname, reference_file, (size_t)PATH_MAX)) {
1233 this_task_streams->index_file_stream = fopen(reference_file, "r");
1234 } else {
1235 this_task_streams->index_file_stream = fopen(pathname, "r");
1236 }
1237
1238 break;
1239 }
1240 }
1241 closedir(dp);
1242 }
1243
1244 static remote_task_file_streams*
1245 retain_file_streams_for_task(task_t task)
1246 {
1247 if (task == MACH_PORT_NULL) return NULL;
1248
1249 OSSpinLockLock(&remote_fd_list_lock);
1250
1251 // see if they're already in use
1252 uint32_t i = 0;
1253 for (i = 0; i < remote_task_fd_count; i++) {
1254 if (remote_fds[i].remote_task == task) {
1255 remote_fds[i].in_use_count++;
1256 OSSpinLockUnlock(&remote_fd_list_lock);
1257 return &remote_fds[i];
1258 }
1259 }
1260
1261 // open them
1262 uint32_t failures = 0;
1263 if (remote_task_fd_count == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1264 while (remote_fds[next_remote_task_fd].in_use_count > 0) {
1265 next_remote_task_fd++;
1266 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1267 failures++;
1268 if (failures >= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1269 OSSpinLockUnlock(&remote_fd_list_lock);
1270 return NULL;
1271 }
1272 }
1273 fclose(remote_fds[next_remote_task_fd].index_file_stream);
1274 destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
1275 }
1276
1277 pid_t pid;
1278 kern_return_t err = pid_for_task(task, &pid);
1279 if (err != KERN_SUCCESS) {
1280 OSSpinLockUnlock(&remote_fd_list_lock);
1281 return NULL;
1282 }
1283
1284 remote_task_file_streams *this_task_streams = &remote_fds[next_remote_task_fd];
1285
1286 open_log_files(pid, this_task_streams);
1287
1288 // check if opens failed
1289 if (this_task_streams->index_file_stream == NULL) {
1290 if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
1291 OSSpinLockUnlock(&remote_fd_list_lock);
1292 return NULL;
1293 }
1294
1295 // check if target pid is running 64-bit
1296 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1297 struct kinfo_proc processInfo;
1298 size_t bufsize = sizeof(processInfo);
1299 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, (size_t)0) == 0 && bufsize > 0) {
1300 this_task_streams->task_is_64_bit = processInfo.kp_proc.p_flag & P_LP64;
1301 } else {
1302 this_task_streams->task_is_64_bit = 0;
1303 }
1304
1305 // otherwise set vars and go
1306 this_task_streams->in_use_count = 1;
1307 this_task_streams->remote_task = task;
1308 this_task_streams->remote_pid = pid;
1309 next_remote_task_fd++;
1310 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1311 remote_task_fd_count = MIN(remote_task_fd_count + 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED);
1312
1313 OSSpinLockUnlock(&remote_fd_list_lock);
1314 return this_task_streams;
1315 }
1316
1317 static void
1318 release_file_streams_for_task(task_t task)
1319 {
1320 OSSpinLockLock(&remote_fd_list_lock);
1321
1322 // decrement in-use count
1323 uint32_t i = 0;
1324 for (i = 0; i < remote_task_fd_count; i++) {
1325 if (remote_fds[i].remote_task == task) {
1326 remote_fds[i].in_use_count--;
1327 break;
1328 }
1329 }
1330
1331 OSSpinLockUnlock(&remote_fd_list_lock);
1332 }
1333
1334 #pragma mark - extern
1335
1336 kern_return_t
1337 __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1338 {
1339 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1340 if (remote_fd == NULL) {
1341 return KERN_FAILURE;
1342 }
1343
1344 update_cache_for_file_streams(remote_fd);
1345
1346 uint32_t collisions = 0;
1347 size_t hash = hash_index(address, remote_fd->cache->cache_node_capacity);
1348 size_t multiplier = hash_multiplier(remote_fd->cache->cache_node_capacity, remote_fd->cache->collision_allowance);
1349 uint64_t located_file_position = 0;
1350
1351 bool found = false;
1352 do {
1353 if (remote_fd->cache->table_memory[hash].address == address) { // hit!
1354 located_file_position = remote_fd->cache->table_memory[hash].index_file_offset;
1355 found = true;
1356 break;
1357 } else if (remote_fd->cache->table_memory[hash].address == 0ull) { // failure!
1358 break;
1359 }
1360
1361 collisions++;
1362 hash = next_hash(hash, multiplier, remote_fd->cache->cache_node_capacity, collisions);
1363
1364 } while (collisions <= remote_fd->cache->collision_allowance);
1365
1366 if (found) {
1367 // prepare for the read; target process could be 32 or 64 bit.
1368 stack_logging_index_event32 *target_32_index = NULL;
1369 stack_logging_index_event64 *target_64_index = NULL;
1370
1371 if (located_file_position >= remote_fd->cache->last_index_file_offset) {
1372 // must be in shared memory
1373 if (remote_fd->cache->shmem) {
1374 if (remote_fd->task_is_64_bit) {
1375 target_64_index = (stack_logging_index_event64*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1376 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1377 } else {
1378 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1379 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1380 }
1381 } else {
1382 found = false;
1383 }
1384
1385 } else {
1386 // it's written to disk
1387 char bufferSpace[128];
1388
1389 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1390 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1391 size_t read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1392 if (read_count) {
1393 if (remote_fd->task_is_64_bit) {
1394 target_64_index = (stack_logging_index_event64*)bufferSpace;
1395 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1396 } else {
1397 target_32_index = (stack_logging_index_event32*)bufferSpace;
1398 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1399 }
1400 } else {
1401 found = false;
1402 }
1403 }
1404 }
1405
1406 release_file_streams_for_task(task);
1407
1408 if (!found) {
1409 return KERN_FAILURE;
1410 }
1411
1412 return __mach_stack_logging_frames_for_uniqued_stack(task, located_file_position, stack_frames_buffer, max_stack_frames, count);
1413 }
1414
1415
1416 kern_return_t
1417 __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context)
1418 {
1419 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1420 if (remote_fd == NULL) {
1421 return KERN_FAILURE;
1422 }
1423
1424 bool reading_all_addresses = (address == 0 ? true : false);
1425 mach_stack_logging_record_t pass_record;
1426 kern_return_t err = KERN_SUCCESS;
1427
1428 // update (read index file once and only once)
1429 update_cache_for_file_streams(remote_fd);
1430
1431 FILE *the_index = (remote_fd->index_file_stream);
1432
1433 // prepare for the read; target process could be 32 or 64 bit.
1434 char bufferSpace[2048]; // 2 kb
1435 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1436 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1437 uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
1438 uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
1439 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1440 size_t number_slots = (size_t)(2048/read_size);
1441 uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
1442
1443 // perform the search
1444 size_t read_count = 0;
1445 int64_t current_file_offset = 0;
1446 uint32_t i;
1447 do {
1448 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1449 // and crop file reading to the point where we last scanned
1450 number_slots = (size_t)MIN(number_slots, total_slots);
1451
1452 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1453 if (number_slots == 0) {
1454 if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
1455 // use shared memory
1456 target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
1457 target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
1458 read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
1459 current_file_offset += read_count * read_size;
1460 } else {
1461 break;
1462 }
1463 } else {
1464 // get and save index (enumerator could modify)
1465 fseeko(the_index, current_file_offset, SEEK_SET);
1466 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1467 current_file_offset = ftello(the_index);
1468 total_slots -= read_count;
1469 }
1470
1471 if (remote_fd->task_is_64_bit) {
1472 for (i = 0; i < read_count; i++) {
1473 if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
1474 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
1475 pass_record.argument = target_64_index[i].argument;
1476 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
1477 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
1478 enumerator(pass_record, context);
1479 }
1480 }
1481 } else {
1482 for (i = 0; i < read_count; i++) {
1483 if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
1484 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
1485 pass_record.argument = target_32_index[i].argument;
1486 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
1487 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
1488 enumerator(pass_record, context);
1489 }
1490 }
1491 }
1492 } while (read_count);
1493
1494 release_file_streams_for_task(task);
1495 return err;
1496 }
1497
1498
1499 kern_return_t
1500 __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1501 {
1502 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1503 if (remote_fd == NULL) return KERN_FAILURE;
1504
1505 __unwind_stack_from_table_index(&remote_fd->cache->uniquing_table, stack_identifier, stack_frames_buffer, count, max_stack_frames);
1506
1507 release_file_streams_for_task(task);
1508
1509 if (*count) return KERN_SUCCESS;
1510 else return KERN_FAILURE;
1511 }
1512
1513
1514 #ifdef TEST_DISK_STACK_LOGGING
1515
1516 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1517
1518 #include <sys/wait.h>
1519
1520 int
1521 main()
1522 {
1523 int status;
1524 int i;
1525 size_t total_globals = 0ul;
1526
1527 fprintf(stderr, "master test process is %d\n", getpid());
1528 fprintf(stderr, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers)); total_globals += sizeof(pre_write_buffers);
1529 fprintf(stderr, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer)); total_globals += sizeof(stack_buffer);
1530 fprintf(stderr, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address)); total_globals += sizeof(last_logged_malloc_address);
1531 fprintf(stderr, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name)); total_globals += sizeof(stack_log_file_base_name);
1532 fprintf(stderr, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix)); total_globals += sizeof(stack_log_file_suffix);
1533 fprintf(stderr, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix)); total_globals += sizeof(stack_log_link_suffix);
1534 fprintf(stderr, "sizeof stack_log_location: %lu\n", sizeof(stack_log_location)); total_globals += sizeof(stack_log_location);
1535 fprintf(stderr, "sizeof stack_log_reference_file: %lu\n", sizeof(stack_log_reference_file)); total_globals += sizeof(stack_log_reference_file);
1536 fprintf(stderr, "sizeof index_file_path: %lu\n", sizeof(index_file_path)); total_globals += sizeof(index_file_path);
1537 fprintf(stderr, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor)); total_globals += sizeof(index_file_descriptor);
1538 fprintf(stderr, "sizeof remote_fds: %lu\n", sizeof(remote_fds)); total_globals += sizeof(remote_fds);
1539 fprintf(stderr, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd)); total_globals += sizeof(next_remote_task_fd);
1540 fprintf(stderr, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count)); total_globals += sizeof(remote_task_fd_count);
1541 fprintf(stderr, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock)); total_globals += sizeof(remote_fd_list_lock);
1542 fprintf(stderr, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction)); total_globals += sizeof(logging_use_compaction);
1543
1544 fprintf(stderr, "size of all global data: %lu\n", total_globals);
1545
1546 create_log_file();
1547
1548 // create a few child processes and exit them cleanly so their logs should get cleaned up
1549 fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
1550 for (i = 0; i < 3; i++) {
1551 if (fork() == 0) {
1552 fprintf(stderr, "\nin child processes %d\n", getpid());
1553 create_log_file();
1554 fprintf(stderr, "exiting child processes %d\n", getpid());
1555 exit(1);
1556 }
1557 wait(&status);
1558 }
1559
1560 // create a few child processes and abruptly _exit them, leaving their logs around
1561 fprintf(stderr, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1562 for (i = 0; i < 3; i++) {
1563 if (fork() == 0) {
1564 fprintf(stderr, "\nin child processes %d\n", getpid());
1565 create_log_file();
1566 fprintf(stderr, "exiting child processes %d\n", getpid());
1567 _exit(1);
1568 }
1569 wait(&status);
1570 }
1571
1572 // this should reap any remaining logs
1573 fprintf(stderr, "\nexiting master test process %d\n", getpid());
1574 delete_log_files();
1575 return 0;
1576 }
1577
1578 #endif