]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging_disk.c
Libc-583.tar.gz
[apple/libc.git] / gen / stack_logging_disk.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <pthread.h>
38 #include <paths.h>
39 #include <errno.h>
40 #include "stack_logging.h"
41 #include "malloc_printf.h"
42 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
43
44 #pragma mark -
45 #pragma mark Defines
46
47 #ifdef TEST_DISK_STACK_LOGGING
48 #define _malloc_printf fprintf
49 #undef ASL_LEVEL_INFO
50 #define ASL_LEVEL_INFO stderr
51 #endif
52
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
56
57 #define BACKTRACE_UNIQUING_DEBUG 0
58
59 // The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
60 // 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
61 // increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
62 // performance O(c*n))
63 #define EXPAND_FACTOR 2
64 #define COLLISION_GROWTH_RATE 3
65
66 // For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
67 // Some useful numbers for the initial max collision value (desiring 66% fill):
68 // 16K-23K slots -> 16 collisions
69 // 24K-31K slots -> 17 collisions
70 // 32K-47K slots -> 18 collisions
71 // 48K-79K slots -> 19 collisions
72 // 80K-96K slots -> 20 collisions
73 #define INITIAL_MAX_COLLIDE 19
74 #define DEFAULT_UNIQUING_PAGE_SIZE 256
75
76 #pragma mark -
77 #pragma mark Macros
78
79 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
80 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
81 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
82
83 #pragma mark -
84 #pragma mark Types
85
86 typedef struct {
87 uintptr_t argument;
88 uintptr_t address;
89 uint64_t offset_and_flags; // top 8 bits are actually the flags!
90 } stack_logging_index_event;
91
92 typedef struct {
93 uint32_t argument;
94 uint32_t address;
95 uint64_t offset_and_flags; // top 8 bits are actually the flags!
96 } stack_logging_index_event32;
97
98 typedef struct {
99 uint64_t argument;
100 uint64_t address;
101 uint64_t offset_and_flags; // top 8 bits are actually the flags!
102 } stack_logging_index_event64;
103
104 #pragma pack(push,4)
105 typedef struct {
106 uint64_t numPages; // number of pages of the table
107 uint64_t numNodes;
108 uint64_t tableSize;
109 uint64_t untouchableNodes;
110 mach_vm_address_t table_address;
111 int32_t max_collide;
112 // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
113 // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
114 #if BACKTRACE_UNIQUING_DEBUG
115 uint64_t nodesFull;
116 uint64_t backtracesContained;
117 #endif
118 mach_vm_address_t *table; // allocated using vm_allocate()
119 } backtrace_uniquing_table;
120 #pragma pack(pop)
121
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
125 #pragma pack(push,4)
126 typedef struct {
127 uint64_t start_index_offset;
128 uint32_t next_free_index_buffer_offset;
129 mach_vm_address_t uniquing_table_address;
130 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
131 backtrace_uniquing_table *uniquing_table;
132 } stack_buffer_shared_memory;
133 #pragma pack(pop)
134
135 // target process address -> record table (for __mach_stack_logging_get_frames)
136 typedef struct {
137 uint64_t address;
138 uint64_t index_file_offset;
139 } remote_index_node;
140
141 // for caching index information client-side:
142 typedef struct {
143 size_t cache_size;
144 size_t cache_node_capacity;
145 uint32_t collision_allowance;
146 remote_index_node *table_memory; // this can be malloced; it's on the client side.
147 stack_buffer_shared_memory *shmem; // shared memory
148 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
149 uint32_t last_pre_written_index_size;
150 uint64_t last_index_file_offset;
151 backtrace_uniquing_table uniquing_table; // snapshot of the remote process' uniquing table
152 } remote_index_cache;
153
154 // for reading stack history information from remote processes:
155 typedef struct {
156 task_t remote_task;
157 pid_t remote_pid;
158 int32_t task_is_64_bit;
159 int32_t in_use_count;
160 FILE *index_file_stream;
161 remote_index_cache *cache;
162 } remote_task_file_streams;
163
164 #pragma mark -
165 #pragma mark Constants/Globals
166
167 static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
168
169 // support for multi-threaded forks
170 extern void __stack_logging_fork_prepare();
171 extern void __stack_logging_fork_parent();
172 extern void __stack_logging_fork_child();
173
174 // support for gdb and others checking for stack_logging locks
175 __private_extern__ boolean_t __stack_logging_locked();
176
177 // single-thread access variables
178 static stack_buffer_shared_memory *pre_write_buffers;
179 static vm_address_t *stack_buffer;
180 static uintptr_t last_logged_malloc_address = 0;
181
182 // Constants to define stack logging file path names.
183 // Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
184 // unless the base directory is specified otherwise with MallocStackLoggingDirectory.
185 // In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
186 static const char *stack_log_file_base_name = "stack-logs.";
187 static const char *stack_log_file_suffix = ".index";
188 static const char *stack_log_link_suffix = ".link";
189
190 static char stack_log_location[PATH_MAX];
191 static char stack_log_reference_file[PATH_MAX];
192 static char index_file_path[PATH_MAX];
193 static int index_file_descriptor = -1;
194
195 // for accessing remote log files
196 static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
197 static uint32_t next_remote_task_fd = 0;
198 static uint32_t remote_task_fd_count = 0;
199 static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
200
201 // activation variables
202 static int logging_use_compaction = 1; // set this to zero to always disable compaction.
203
204 // We set malloc_logger to NULL to disable logging, if we encounter errors
205 // during file writing
206 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
207 extern malloc_logger_t *malloc_logger;
208
209 #pragma mark -
210 #pragma mark In-Memory Backtrace Uniquing
211
212 static __attribute__((always_inline))
213 inline void*
214 allocate_pages(uint64_t memSize)
215 {
216 mach_vm_address_t allocatedMem = 0ull;
217 if (mach_vm_allocate(mach_task_self(), &allocatedMem, memSize, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)) != KERN_SUCCESS) {
218 malloc_printf("allocate_pages(): virtual memory exhaused!\n");
219 }
220 return (void*)(uintptr_t)allocatedMem;
221 }
222
223 static __attribute__((always_inline))
224 inline int
225 deallocate_pages(void* memPointer, uint64_t memSize)
226 {
227 return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)memPointer, memSize);
228 }
229
230 static backtrace_uniquing_table*
231 __create_uniquing_table(void)
232 {
233 backtrace_uniquing_table *uniquing_table = (backtrace_uniquing_table*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table)));
234 if (!uniquing_table) return NULL;
235 bzero(uniquing_table, sizeof(backtrace_uniquing_table));
236 uniquing_table->numPages = DEFAULT_UNIQUING_PAGE_SIZE;
237 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
238 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
239 uniquing_table->table = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
240 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
241 uniquing_table->max_collide = INITIAL_MAX_COLLIDE;
242 uniquing_table->untouchableNodes = 0;
243
244 #if BACKTRACE_UNIQUING_DEBUG
245 malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table->tableSize >> 10, uniquing_table->tableSize >> 20, uniquing_table->numNodes, uniquing_table->untouchableNodes);
246 malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table->table, (void*)((uintptr_t)uniquing_table->table + (uintptr_t)uniquing_table->tableSize));
247 #endif
248 return uniquing_table;
249 }
250
251 static void
252 __expand_uniquing_table(backtrace_uniquing_table *uniquing_table)
253 {
254 mach_vm_address_t *oldTable = uniquing_table->table;
255 uint64_t oldsize = uniquing_table->tableSize;
256 uint64_t oldnumnodes = uniquing_table->numNodes;
257
258 uniquing_table->numPages = uniquing_table->numPages << EXPAND_FACTOR;
259 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
260 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
261 mach_vm_address_t *newTable = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
262
263 uniquing_table->table = newTable;
264 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
265 uniquing_table->max_collide = uniquing_table->max_collide + COLLISION_GROWTH_RATE;
266
267 if (mach_vm_copy(mach_task_self(), (mach_vm_address_t)(uintptr_t)oldTable, oldsize, (mach_vm_address_t)(uintptr_t)newTable) != KERN_SUCCESS) {
268 malloc_printf("expandUniquingTable(): VMCopyFailed\n");
269 }
270 uniquing_table->untouchableNodes = oldnumnodes;
271
272 #if BACKTRACE_UNIQUING_DEBUG
273 malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
274 uniquing_table->nodesFull, oldnumnodes, (int)(((uniquing_table->nodesFull * 100.0) / (double)oldnumnodes) + 0.5),
275 uniquing_table->numNodes, uniquing_table->untouchableNodes, uniquing_table->backtracesContained);
276 malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable, (void*)((uintptr_t)newTable + (uintptr_t)(uniquing_table->tableSize)));
277 malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable, (void*)((uintptr_t)oldTable + (uintptr_t)oldsize));
278 #endif
279
280 if (deallocate_pages(oldTable, oldsize) != KERN_SUCCESS) {
281 malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table->table);
282 }
283 }
284
285 static int
286 __enter_frames_in_table(backtrace_uniquing_table *uniquing_table, uint64_t *foundIndex, mach_vm_address_t *frames, int32_t count)
287 {
288 mach_vm_address_t thisPC;
289 uint64_t hash, uParent = (uint64_t)(-1ll), modulus = (uniquing_table->numNodes-uniquing_table->untouchableNodes-1);
290 int32_t collisions, lcopy = count, returnVal = 1;
291 uint64_t hash_multiplier = ((uniquing_table->numNodes - uniquing_table->untouchableNodes)/(uniquing_table->max_collide*2+1));
292 mach_vm_address_t *node;
293 while (--lcopy >= 0) {
294 thisPC = frames[lcopy];
295
296 // hash = initialHash(uniquing_table, uParent, thisPC);
297 hash = uniquing_table->untouchableNodes + (((uParent << 4) ^ (thisPC >> 2)) % modulus);
298 collisions = uniquing_table->max_collide;
299
300 while (collisions--) {
301 node = uniquing_table->table + (hash * 2);
302
303 if (*node == 0 && node[1] == 0) {
304 // blank; store this entry!
305 // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
306 node[0] = thisPC;
307 node[1] = uParent;
308 uParent = hash;
309 #if BACKTRACE_UNIQUING_DEBUG
310 uniquing_table->nodesFull++;
311 if (lcopy == 0) {
312 uniquing_table->backtracesContained++;
313 }
314 #endif
315 break;
316 }
317 if (*node == thisPC && node[1] == uParent) {
318 // hit! retrieve index and go.
319 uParent = hash;
320 break;
321 }
322
323 hash += collisions * hash_multiplier + 1;
324
325 if (hash >= uniquing_table->numNodes) {
326 hash -= (uniquing_table->numNodes - uniquing_table->untouchableNodes); // wrap around.
327 }
328 }
329
330 if (collisions < 0) {
331 returnVal = 0;
332 break;
333 }
334 }
335
336 if (returnVal) *foundIndex = uParent;
337
338 return returnVal;
339 }
340
341 static void
342 __unwind_stack_from_table_index(backtrace_uniquing_table *uniquing_table, uint64_t index_pos, mach_vm_address_t *out_frames_buffer, uint32_t *out_frames_count, uint32_t max_frames)
343 {
344 mach_vm_address_t *node = uniquing_table->table + (index_pos * 2);
345 uint32_t foundFrames = 0;
346 if (index_pos < uniquing_table->numNodes) {
347 while (foundFrames < max_frames) {
348 out_frames_buffer[foundFrames++] = node[0];
349 if (node[1] == (mach_vm_address_t)(-1ll)) break;
350 node = uniquing_table->table + (node[1] * 2);
351 }
352 }
353
354 *out_frames_count = foundFrames;
355 }
356
357 #pragma mark -
358 #pragma mark Disk Stack Logging
359
360 static void delete_log_files(void); // pre-declare
361 static int delete_logging_file(char *log_location);
362
363 static void
364 append_int(char * filename, pid_t pid, size_t maxLength)
365 {
366 size_t len = strlen(filename);
367
368 uint32_t count = 0;
369 pid_t value = pid;
370 while (value > 0) {
371 value /= 10;
372 count++;
373 }
374
375 if (len + count >= maxLength) return; // don't modify the string if it would violate maxLength
376
377 filename[len + count] = '\0';
378
379 value = pid;
380 uint32_t i;
381 for (i = 0 ; i < count ; i ++) {
382 filename[len + count - 1 - i] = '0' + value % 10;
383 value /= 10;
384 }
385 }
386
387 // If successful, returns path to log file that was created. Otherwise returns NULL.
388 static char *
389 create_log_file(void)
390 {
391 pid_t pid = getpid();
392 const char *progname = getprogname();
393 char *created_log_location = NULL;
394
395 // WARNING! use of snprintf can induce malloc() calls
396 bool use_alternate_location = false;
397 char *evn_log_directory = getenv("MallocStackLoggingDirectory");
398 if (evn_log_directory && *evn_log_directory) {
399 use_alternate_location = true;
400 strlcpy(stack_log_location, evn_log_directory, (size_t)PATH_MAX);
401 size_t evn_log_len = strlen(stack_log_location);
402 // add the '/' only if it's not already there.
403 if (evn_log_directory[evn_log_len-1] != '/') {
404 strlcat(stack_log_location, "/", (size_t)PATH_MAX);
405 }
406 } else {
407 strlcpy(stack_log_location, _PATH_TMP, (size_t)PATH_MAX);
408 }
409
410 strlcat(stack_log_location, stack_log_file_base_name, (size_t)PATH_MAX);
411 append_int(stack_log_location, pid, (size_t)PATH_MAX);
412 if (progname && progname[0] != '\0') {
413 strlcat(stack_log_location, ".", (size_t)PATH_MAX);
414 strlcat(stack_log_location, progname, (size_t)PATH_MAX);
415 }
416 if (!use_alternate_location) strlcat(stack_log_location, ".XXXXXX", (size_t)PATH_MAX);
417 strlcat(stack_log_location, stack_log_file_suffix, (size_t)PATH_MAX);
418
419 // in the case where the user has specified an alternate location, drop a reference file
420 // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
421 // stack logging file there.
422 if (use_alternate_location) {
423 strlcpy(stack_log_reference_file, _PATH_TMP, (size_t)PATH_MAX);
424 strlcat(stack_log_reference_file, stack_log_file_base_name, (size_t)PATH_MAX);
425 append_int(stack_log_reference_file, pid, (size_t)PATH_MAX);
426 if (progname && progname[0] != '\0') {
427 strlcat(stack_log_reference_file, ".", (size_t)PATH_MAX);
428 strlcat(stack_log_reference_file, progname, (size_t)PATH_MAX);
429 }
430 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
431 strlcat(stack_log_reference_file, stack_log_link_suffix, (size_t)PATH_MAX);
432
433 int link_file_descriptor = mkstemps(stack_log_reference_file, (int)strlen(stack_log_link_suffix));
434 if (link_file_descriptor == -1) {
435 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack reference file at %s\n", stack_log_location);
436 return NULL;
437 }
438 ssize_t written = write(link_file_descriptor, stack_log_location, strlen(stack_log_location));
439 if (written < (ssize_t)strlen(stack_log_location)) {
440 _malloc_printf(ASL_LEVEL_INFO, "unable to write to stack reference file at %s\n", stack_log_location);
441 return NULL;
442 }
443 const char *description_string = "\n(This is a reference file to the stack logs at the path above.)\n";
444 write(link_file_descriptor, description_string, strlen(description_string));
445 close(link_file_descriptor);
446 }
447
448 // Securely create the log file.
449 if ((index_file_descriptor = mkstemps(stack_log_location, (int)strlen(stack_log_file_suffix))) != -1) {
450 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", stack_log_location);
451 created_log_location = stack_log_location;
452 } else {
453 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack logs at %s\n", stack_log_location);
454 if (use_alternate_location) delete_logging_file(stack_log_reference_file);
455 stack_log_reference_file[0] = '\0';
456 stack_log_location[0] = '\0';
457 created_log_location = NULL;
458 }
459 return created_log_location;
460 }
461
462 // Check to see if the log file is actually a reference to another location
463 static int
464 log_file_is_reference(char *log_location, char *out_reference_loc_buffer, size_t max_reference_path_size)
465 {
466 if (log_location == NULL || log_location[0] == '\0') return 0;
467
468 size_t log_len = strlen(log_location);
469 size_t link_suffix_len = strlen(stack_log_link_suffix);
470 if (log_len < link_suffix_len || strncmp(log_location+log_len-link_suffix_len, stack_log_link_suffix, link_suffix_len) != 0) {
471 // not a reference file.
472 return 0;
473 }
474
475 if (!out_reference_loc_buffer || max_reference_path_size == 0) return 1;
476
477 FILE *reference_file = fopen(log_location, "r");
478 if (reference_file == NULL) {
479 // if unable to open the file, it may be because another user created it; no need to warn.
480 out_reference_loc_buffer[0] = '\0';
481 return 1;
482 }
483
484 char *ret = fgets(out_reference_loc_buffer, (int)max_reference_path_size, reference_file);
485 if (!ret) {
486 out_reference_loc_buffer[0] = '\0';
487 _malloc_printf(ASL_LEVEL_INFO, "unable to read from stack logging reference file at %s\n", log_location);
488 return 1;
489 } else {
490 size_t read_line_len = strlen(out_reference_loc_buffer);
491 if (read_line_len >= 1 && out_reference_loc_buffer[read_line_len-1] == '\n') {
492 out_reference_loc_buffer[read_line_len-1] = '\0';
493 }
494 }
495
496 fclose(reference_file);
497
498 return 1;
499 }
500
501 // This function may be called from either the target process when exiting, or from either the the target process or
502 // a stack log analysis process, when reaping orphaned stack log files.
503 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
504 static int
505 delete_logging_file(char *log_location)
506 {
507 if (log_location == NULL || log_location[0] == '\0') return 0;
508
509 struct stat statbuf;
510 if (unlink(log_location) != 0 && stat(log_location, &statbuf) == 0) {
511 return -1;
512 }
513 return 0;
514 }
515
516 // This function will be called from atexit() in the target process.
517 static void
518 delete_log_files(void)
519 {
520 if (stack_log_location && stack_log_location[0]) {
521 if (delete_logging_file(stack_log_location) == 0) {
522 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", stack_log_location);
523 index_file_path[0] = '\0';
524 } else {
525 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", stack_log_location);
526 }
527 }
528 if (stack_log_reference_file && stack_log_reference_file[0]) {
529 delete_logging_file(stack_log_reference_file);
530 }
531 }
532
533 static bool
534 is_process_running(pid_t pid)
535 {
536 struct kinfo_proc kpt[1];
537 size_t size = sizeof(struct kinfo_proc);
538 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
539
540 sysctl(mib, 4, kpt, &size, NULL, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
541
542 return (size==sizeof(struct kinfo_proc));
543 }
544
545 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
546 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
547 // So, reap any stack log files for processes that no longer exist.
548 //
549 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
550 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
551 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
552 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
553 // its own log files.
554 static void
555 reap_orphaned_log_files(bool remove_for_this_pid)
556 {
557 DIR *dp;
558 struct dirent *entry;
559 char prefix_name[PATH_MAX];
560 char pathname[PATH_MAX];
561 pid_t current_pid = getpid();
562
563 if ((dp = opendir(_PATH_TMP)) == NULL) {
564 return;
565 }
566
567 strlcpy(prefix_name, stack_log_file_base_name, (size_t)PATH_MAX);
568 size_t prefix_length = strlen(prefix_name);
569
570 while ( (entry = readdir(dp)) != NULL ) {
571 if ( entry->d_type != DT_DIR && entry->d_type != DT_LNK && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
572 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
573 if ( (! is_process_running((pid_t)pid)) || (remove_for_this_pid && (pid_t)pid == current_pid) ) {
574 strlcpy(pathname, _PATH_TMP, (size_t)PATH_MAX);
575 strlcat(pathname, entry->d_name, (size_t)PATH_MAX);
576 char reference_file_buffer[PATH_MAX];
577 bool pathname_is_ref_file = false;
578 if (log_file_is_reference(pathname, reference_file_buffer, (size_t)PATH_MAX) && *reference_file_buffer) {
579 pathname_is_ref_file = true;
580 if (delete_logging_file(reference_file_buffer) == 0) {
581 if (remove_for_this_pid && pid == current_pid) {
582 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", reference_file_buffer);
583 } else {
584 _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, reference_file_buffer);
585 }
586 }
587 }
588 if (delete_logging_file(pathname) == 0) {
589 if (remove_for_this_pid && pid == current_pid) {
590 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
591 } else {
592 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, pathname);
593 }
594 char shmem_name_string[PATH_MAX];
595 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
596 append_int(shmem_name_string, (pid_t)pid, (size_t)PATH_MAX);
597 if (pid != current_pid) shm_unlink(shmem_name_string);
598 }
599 }
600 }
601 }
602 closedir(dp);
603 }
604
605 /*
606 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
607 * for disabling any future logging in this process and for informing the user.
608 */
609 static void
610 disable_stack_logging(void)
611 {
612 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
613 stack_logging_enable_logging = 0;
614 malloc_logger = NULL;
615 }
616
617 /* A wrapper around write() that will try to reopen the index/stack file and
618 * write to it if someone closed it underneath us (e.g. the process we just
619 * started decide to close all file descriptors except stin/err/out). Some
620 * programs like to do that and calling abort() on them is rude.
621 */
622 static ssize_t
623 robust_write(int fd, const void *buf, size_t nbyte) {
624 extern int errno;
625 ssize_t written = write(fd, buf, nbyte);
626 if (written == -1 && errno == EBADF) {
627 char *file_to_reopen = NULL;
628 int *fd_to_reset = NULL;
629
630 // descriptor was closed on us. We need to reopen it
631 if (fd == index_file_descriptor) {
632 file_to_reopen = index_file_path;
633 fd_to_reset = &index_file_descriptor;
634 } else {
635 // We don't know about this file. Return (and abort()).
636 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor; expecting stack logging index file\n");
637 return -1;
638 }
639
640 // The file *should* already exist. If not, fail.
641 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
642 if (fd < 3) {
643 // If we somehow got stdin/out/err, we need to relinquish them and
644 // get another fd.
645 int fds_to_close[3] = { 0 };
646 while (fd < 3) {
647 if (fd == -1) {
648 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack logging file %s\n", file_to_reopen);
649 delete_log_files();
650 return -1;
651 }
652 fds_to_close[fd] = 1;
653 fd = dup(fd);
654 }
655
656 // We have an fd we like. Close the ones we opened.
657 if (fds_to_close[0]) close(0);
658 if (fds_to_close[1]) close(1);
659 if (fds_to_close[2]) close(2);
660 }
661
662 *fd_to_reset = fd;
663 written = write(fd, buf, nbyte);
664 }
665 return written;
666 }
667
668 static void
669 flush_data(void)
670 {
671 ssize_t written; // signed size_t
672 size_t remaining;
673 char * p;
674
675 if (index_file_descriptor == -1) {
676 if (create_log_file() == NULL) {
677 return;
678 }
679 }
680
681 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
682 p = pre_write_buffers->index_buffer;
683 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
684 while (remaining > 0) {
685 written = robust_write(index_file_descriptor, p, remaining);
686 if (written == -1) {
687 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n", index_file_path, strerror(errno));
688 disable_stack_logging();
689 return;
690 }
691 p += written;
692 remaining -= written;
693 }
694
695 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
696 pre_write_buffers->next_free_index_buffer_offset = 0;
697 }
698
699 static void
700 prepare_to_log_stacks(void)
701 {
702 if (!pre_write_buffers) {
703 last_logged_malloc_address = 0ul;
704 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
705
706 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
707 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
708 // the contents of these buffers don't change while being inspected.
709 char shmem_name_string[PATH_MAX];
710 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
711 append_int(shmem_name_string, getpid(), (size_t)PATH_MAX);
712
713 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
714 if (shmid < 0) {
715 // Failed to create shared memory region; turn off stack logging.
716 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
717 disable_stack_logging();
718 return;
719 }
720
721 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
722 ftruncate(shmid, (off_t)full_shared_mem_size);
723 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
724 close(shmid);
725
726 if (!pre_write_buffers) {
727 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
728 disable_stack_logging();
729 return;
730 }
731
732 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
733 pre_write_buffers->start_index_offset = 0ull;
734 pre_write_buffers->next_free_index_buffer_offset = 0;
735
736 // create the backtrace uniquing table
737 pre_write_buffers->uniquing_table = __create_uniquing_table();
738 pre_write_buffers->uniquing_table_address = (mach_vm_address_t)(uintptr_t)pre_write_buffers->uniquing_table;
739 if (!pre_write_buffers->uniquing_table) {
740 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack uniquing table\n");
741 disable_stack_logging();
742 return;
743 }
744
745 stack_buffer = (vm_address_t*)allocate_pages((uint64_t)round_page(sizeof(vm_address_t) * STACK_LOGGING_MAX_STACK_SIZE));
746 if (!stack_buffer) {
747 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack trace buffer\n");
748 disable_stack_logging();
749 return;
750 }
751
752 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
753 atexit(delete_log_files); // atexit() can call malloc()
754 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
755
756 // this call ensures that the log files exist; analyzing processes will rely on this assumption.
757 if (create_log_file() == NULL) {
758 disable_stack_logging();
759 return;
760 }
761 }
762 }
763
764 void
765 __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
766 {
767 if (!stack_logging_enable_logging) return;
768
769 // check incoming data
770 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
771 uintptr_t swapper = size;
772 size = ptr_arg;
773 ptr_arg = swapper;
774 if (ptr_arg == return_val) return; // realloc had no effect, skipping
775
776 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
777 type_flags ^= stack_logging_type_dealloc;
778 } else {
779 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
780 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
781 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
782 return;
783 }
784 }
785 if (type_flags & stack_logging_type_dealloc) {
786 if (size) {
787 ptr_arg = size;
788 size = 0;
789 } else return; // free(nil)
790 }
791 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
792
793 type_flags &= 0x7;
794
795 // now actually begin
796 prepare_to_log_stacks();
797
798 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
799 if (!stack_logging_enable_logging) return;
800 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
801
802 // lock and enter
803 OSSpinLockLock(&stack_logging_lock);
804
805 if (!stack_logging_enable_logging) {
806 OSSpinLockUnlock(&stack_logging_lock);
807 return;
808 }
809
810 // compaction
811 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
812 // *waves hand* the last allocation never occurred
813 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
814 last_logged_malloc_address = 0ul;
815
816 OSSpinLockUnlock(&stack_logging_lock);
817 return;
818 }
819
820 // gather stack
821 uint32_t count;
822 thread_stack_pcs(stack_buffer, STACK_LOGGING_MAX_STACK_SIZE-1, &count); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
823 stack_buffer[count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
824 num_hot_to_skip += 2;
825 if (count <= num_hot_to_skip) {
826 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
827 OSSpinLockUnlock(&stack_logging_lock);
828 return;
829 }
830
831 // unique stack in memory
832 count -= num_hot_to_skip;
833 #if __LP64__
834 mach_vm_address_t *frames = (mach_vm_address_t*)stack_buffer + num_hot_to_skip;
835 #else
836 mach_vm_address_t frames[STACK_LOGGING_MAX_STACK_SIZE];
837 uint32_t i;
838 for (i = 0; i < count; i++) {
839 frames[i] = stack_buffer[i+num_hot_to_skip];
840 }
841 #endif
842
843 uint64_t uniqueStackIdentifier = (uint64_t)(-1ll);
844 while (!__enter_frames_in_table(pre_write_buffers->uniquing_table, &uniqueStackIdentifier, frames, (int32_t)count)) {
845 __expand_uniquing_table(pre_write_buffers->uniquing_table);
846 }
847
848 stack_logging_index_event current_index;
849 if (type_flags & stack_logging_type_alloc) {
850 current_index.address = STACK_LOGGING_DISGUISE(return_val);
851 current_index.argument = size;
852 if (logging_use_compaction) {
853 last_logged_malloc_address = current_index.address; // disguised
854 }
855 } else {
856 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
857 current_index.argument = 0ul;
858 last_logged_malloc_address = 0ul;
859 }
860 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier, type_flags);
861
862 // the following line is a good debugging tool for logging each allocation event as it happens.
863 // malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
864
865 // flush the data buffer to disk if necessary
866 if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
867 flush_data();
868 }
869
870 // store bytes in buffers
871 memcpy(pre_write_buffers->index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
872 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
873
874 OSSpinLockUnlock(&stack_logging_lock);
875 }
876
877 void
878 __stack_logging_fork_prepare() {
879 OSSpinLockLock(&stack_logging_lock);
880 }
881
882 void
883 __stack_logging_fork_parent() {
884 OSSpinLockUnlock(&stack_logging_lock);
885 }
886
887 void
888 __stack_logging_fork_child() {
889 malloc_logger = NULL;
890 stack_logging_enable_logging = 0;
891 OSSpinLockUnlock(&stack_logging_lock);
892 }
893
894 boolean_t
895 __stack_logging_locked()
896 {
897 bool acquired_lock = OSSpinLockTry(&stack_logging_lock);
898 if (acquired_lock) OSSpinLockUnlock(&stack_logging_lock);
899 return (acquired_lock ? false : true);
900 }
901
902 #pragma mark -
903 #pragma mark Remote Stack Log Access
904
905 #pragma mark - Design notes:
906
907 /*
908
909 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
910 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
911 // Gets the last allocation record about address
912
913 if !address, will load index and iterate through (expensive)
914 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
915 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
916 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
917
918 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
919 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
920 // Given a uniqued_stack fills stack_frames_buffer
921
922 */
923
924 #pragma mark - caching
925
926 __attribute__((always_inline)) static inline size_t
927 hash_index(uint64_t address, size_t max_pos) {
928 return (size_t)((address >> 2) % (max_pos-1)); // simplicity rules.
929 }
930
931 __attribute__((always_inline)) static inline size_t
932 hash_multiplier(size_t capacity, uint32_t allowed_collisions) {
933 return (capacity/(allowed_collisions*2+1));
934 }
935
936 __attribute__((always_inline)) static inline size_t
937 next_hash(size_t hash, size_t multiplier, size_t capacity, uint32_t collisions) {
938 hash += multiplier * collisions;
939 if (hash >= capacity) hash -= capacity;
940 return hash;
941 }
942
943 static void
944 transfer_node(remote_index_cache *cache, remote_index_node *old_node)
945 {
946 uint32_t collisions = 0;
947 size_t pos = hash_index(old_node->address, cache->cache_node_capacity);
948 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
949 do {
950 if (cache->table_memory[pos].address == old_node->address) { // hit like this shouldn't happen.
951 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node)\n");
952 break;
953 } else if (cache->table_memory[pos].address == 0) { // empty
954 cache->table_memory[pos] = *old_node;
955 break;
956 } else {
957 collisions++;
958 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
959 }
960 } while (collisions <= cache->collision_allowance);
961
962 if (collisions > cache->collision_allowance) {
963 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
964 }
965 }
966
967 static void
968 expand_cache(remote_index_cache *cache)
969 {
970 // keep old stats
971 size_t old_node_capacity = cache->cache_node_capacity;
972 remote_index_node *old_table = cache->table_memory;
973
974 // double size
975 cache->cache_size <<= 2;
976 cache->cache_node_capacity <<= 2;
977 cache->collision_allowance += 3;
978 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
979
980 // repopulate (expensive!)
981 size_t i;
982 for (i = 0; i < old_node_capacity; i++) {
983 if (old_table[i].address) {
984 transfer_node(cache, &old_table[i]);
985 }
986 }
987 free(old_table);
988 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
989 }
990
991 static void
992 insert_node(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
993 {
994 uint32_t collisions = 0;
995 size_t pos = hash_index(address, cache->cache_node_capacity);
996 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
997
998 bool inserted = false;
999 while (!inserted) {
1000 if (cache->table_memory[pos].address == 0ull || cache->table_memory[pos].address == address) { // hit or empty
1001 cache->table_memory[pos].address = address;
1002 cache->table_memory[pos].index_file_offset = index_file_offset;
1003 inserted = true;
1004 break;
1005 }
1006
1007 collisions++;
1008 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1009
1010 if (collisions > cache->collision_allowance) {
1011 expand_cache(cache);
1012 pos = hash_index(address, cache->cache_node_capacity);
1013 multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1014 collisions = 0;
1015 }
1016 }
1017
1018 }
1019
1020 static void
1021 update_cache_for_file_streams(remote_task_file_streams *descriptors)
1022 {
1023 remote_index_cache *cache = descriptors->cache;
1024
1025 // create from scratch if necessary.
1026 if (!cache) {
1027 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
1028 cache->cache_node_capacity = 1 << 14;
1029 cache->collision_allowance = 17;
1030 cache->last_index_file_offset = 0;
1031 cache->cache_size = cache->cache_node_capacity*sizeof(remote_index_node);
1032 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1033
1034 // now map in the shared memory, if possible
1035 char shmem_name_string[PATH_MAX];
1036 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
1037 append_int(shmem_name_string, descriptors->remote_pid, (size_t)PATH_MAX);
1038
1039 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
1040 if (shmid >= 0) {
1041 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
1042 close(shmid);
1043 }
1044
1045 if (shmid < 0 || cache->shmem == NULL) {
1046 // failed to connect to the shared memory region; warn and continue.
1047 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1048 }
1049 }
1050
1051 // suspend and see how much updating there is to do. there are three scenarios, listed below
1052 bool update_snapshot = false;
1053 if (descriptors->remote_task != mach_task_self()) {
1054 task_suspend(descriptors->remote_task);
1055 }
1056
1057 struct stat file_statistics;
1058 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1059 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1060 uint64_t read_this_update = 0;
1061
1062 // the delta indecies is a complex number; there are three cases:
1063 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1064 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1065 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1066 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1067 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1068 uint32_t last_snapshot_scan_index = 0;
1069 if (delta_indecies && cache->shmem) {
1070 // case 3: add cache scanned to known from disk and recalc
1071 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1072 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1073 update_snapshot = true;
1074 } else if (cache->shmem) {
1075 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1076 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1077 }
1078
1079 // no update necessary for the file; check if need a snapshot.
1080 if (delta_indecies == 0) {
1081 if (cache->shmem && !update_snapshot) {
1082 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1083 }
1084 }
1085
1086 // if a snapshot is necessary, memcpy from remote frozen process' memory
1087 // note: there were two ways to do this – spin lock or suspend. suspend allows us to
1088 // analyze processes even if they were artificially suspended. with a lock, there'd be
1089 // worry that the target was suspended with the lock taken.
1090 if (update_snapshot) {
1091 memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
1092 // also need to update our version of the remote uniquing table
1093 vm_address_t local_uniquing_address = 0ul;
1094 mach_msg_type_number_t local_uniquing_size = 0;
1095 mach_vm_size_t desired_size = round_page(sizeof(backtrace_uniquing_table));
1096 kern_return_t err;
1097 if ((err = mach_vm_read(descriptors->remote_task, cache->shmem->uniquing_table_address, desired_size, &local_uniquing_address, &local_uniquing_size)) != KERN_SUCCESS
1098 || local_uniquing_size != desired_size) {
1099 fprintf(stderr, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err, mach_error_string(err));
1100 } else {
1101 // the mach_vm_read was successful, so acquire the uniquing table
1102
1103 // need to re-read the table, so deallocate the current memory
1104 if (cache->uniquing_table.table) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)(cache->uniquing_table.table), cache->uniquing_table.tableSize);
1105
1106 // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
1107 // remote process. this pointer will be mapped shared in a few lines.
1108 cache->uniquing_table = *((backtrace_uniquing_table*)local_uniquing_address);
1109
1110 vm_address_t local_table_address = 0ul;
1111 mach_msg_type_number_t local_table_size = 0;
1112
1113 err = mach_vm_read(descriptors->remote_task, cache->uniquing_table.table_address, cache->uniquing_table.tableSize, &local_table_address, &local_table_size);
1114 if (err == KERN_SUCCESS) cache->uniquing_table.table = (mach_vm_address_t*)local_table_address;
1115 else cache->uniquing_table.table = NULL;
1116
1117 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)local_uniquing_address, (mach_vm_size_t)local_uniquing_size);
1118 }
1119 }
1120
1121 // resume
1122 if (descriptors->remote_task != mach_task_self()) {
1123 task_resume(descriptors->remote_task);
1124 }
1125
1126 if (!update_snapshot && delta_indecies == 0) return; // absolutely no updating needed.
1127
1128 FILE *the_index = (descriptors->index_file_stream);
1129
1130 // prepare for the read; target process could be 32 or 64 bit.
1131
1132 stack_logging_index_event32 *target_32_index = NULL;
1133 stack_logging_index_event64 *target_64_index = NULL;
1134
1135 // perform the update from the file
1136 uint32_t i;
1137 if (delta_indecies) {
1138 char bufferSpace[4096]; // 4 kb
1139 target_32_index = (stack_logging_index_event32*)bufferSpace;
1140 target_64_index = (stack_logging_index_event64*)bufferSpace;
1141 size_t number_slots = (size_t)(4096/read_size);
1142
1143 size_t read_count = 0;
1144 if (fseeko(the_index, (off_t)(cache->last_index_file_offset), SEEK_SET)) {
1145 fprintf(stderr, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1146 }
1147 off_t current_index_position = cache->last_index_file_offset;
1148 do {
1149 number_slots = (size_t)MIN(delta_indecies - read_this_update, number_slots);
1150 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1151 if (descriptors->task_is_64_bit) {
1152 for (i = 0; i < read_count; i++) {
1153 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
1154 read_this_update++;
1155 current_index_position += read_size;
1156 }
1157 } else {
1158 for (i = 0; i < read_count; i++) {
1159 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
1160 read_this_update++;
1161 current_index_position += read_size;
1162 }
1163 }
1164 } while (read_count);
1165
1166 if (read_this_update < delta_indecies) {
1167 fprintf(stderr, "insufficient data in remote stack index file; expected more records.\n");
1168 }
1169 cache->last_index_file_offset += read_this_update * read_size;
1170 }
1171
1172 if (update_snapshot) {
1173 target_32_index = (stack_logging_index_event32*)(cache->snapshot.index_buffer);
1174 target_64_index = (stack_logging_index_event64*)(cache->snapshot.index_buffer);
1175
1176 uint32_t free_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1177 off_t current_index_position = cache->snapshot.start_index_offset;
1178 if (descriptors->task_is_64_bit) {
1179 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1180 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1181 }
1182 } else {
1183 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1184 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1185 }
1186 }
1187 }
1188 }
1189
1190 static void
1191 destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
1192 {
1193 if (descriptors->cache->shmem) {
1194 munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
1195 }
1196 free(descriptors->cache->table_memory);
1197 free(descriptors->cache);
1198 descriptors->cache = NULL;
1199 }
1200
1201 #pragma mark - internal
1202
1203 // In the stack log analysis process, find the stack logging files for target process <pid>
1204 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1205 // If we find such a directory then open the stack logging files in there.
1206 static void
1207 open_log_files(pid_t pid, remote_task_file_streams *this_task_streams)
1208 {
1209 DIR *dp;
1210 struct dirent *entry;
1211 char prefix_name[PATH_MAX];
1212 char pathname[PATH_MAX];
1213
1214 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1215
1216 if ((dp = opendir(_PATH_TMP)) == NULL) {
1217 return;
1218 }
1219
1220 // It's OK to use snprintf in this routine since it should only be called by the clients
1221 // of stack logging, and thus calls to malloc are OK.
1222 snprintf(prefix_name, (size_t)PATH_MAX, "%s%d.", stack_log_file_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1223 size_t prefix_length = strlen(prefix_name);
1224
1225 while ( (entry = readdir(dp)) != NULL ) {
1226 if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
1227 snprintf(pathname, (size_t)PATH_MAX, "%s%s", _PATH_TMP, entry->d_name);
1228 char reference_file[PATH_MAX];
1229 if (log_file_is_reference(pathname, reference_file, (size_t)PATH_MAX)) {
1230 this_task_streams->index_file_stream = fopen(reference_file, "r");
1231 } else {
1232 this_task_streams->index_file_stream = fopen(pathname, "r");
1233 }
1234
1235 break;
1236 }
1237 }
1238 closedir(dp);
1239 }
1240
1241 static remote_task_file_streams*
1242 retain_file_streams_for_task(task_t task)
1243 {
1244 if (task == MACH_PORT_NULL) return NULL;
1245
1246 OSSpinLockLock(&remote_fd_list_lock);
1247
1248 // see if they're already in use
1249 uint32_t i = 0;
1250 for (i = 0; i < remote_task_fd_count; i++) {
1251 if (remote_fds[i].remote_task == task) {
1252 remote_fds[i].in_use_count++;
1253 OSSpinLockUnlock(&remote_fd_list_lock);
1254 return &remote_fds[i];
1255 }
1256 }
1257
1258 // open them
1259 uint32_t failures = 0;
1260 if (remote_task_fd_count == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1261 while (remote_fds[next_remote_task_fd].in_use_count > 0) {
1262 next_remote_task_fd++;
1263 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1264 failures++;
1265 if (failures >= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1266 OSSpinLockUnlock(&remote_fd_list_lock);
1267 return NULL;
1268 }
1269 }
1270 fclose(remote_fds[next_remote_task_fd].index_file_stream);
1271 destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
1272 }
1273
1274 pid_t pid;
1275 kern_return_t err = pid_for_task(task, &pid);
1276 if (err != KERN_SUCCESS) {
1277 OSSpinLockUnlock(&remote_fd_list_lock);
1278 return NULL;
1279 }
1280
1281 remote_task_file_streams *this_task_streams = &remote_fds[next_remote_task_fd];
1282
1283 open_log_files(pid, this_task_streams);
1284
1285 // check if opens failed
1286 if (this_task_streams->index_file_stream == NULL) {
1287 if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
1288 OSSpinLockUnlock(&remote_fd_list_lock);
1289 return NULL;
1290 }
1291
1292 // check if target pid is running 64-bit
1293 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1294 struct kinfo_proc processInfo;
1295 size_t bufsize = sizeof(processInfo);
1296 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, (size_t)0) == 0 && bufsize > 0) {
1297 this_task_streams->task_is_64_bit = processInfo.kp_proc.p_flag & P_LP64;
1298 } else {
1299 this_task_streams->task_is_64_bit = 0;
1300 }
1301
1302 // otherwise set vars and go
1303 this_task_streams->in_use_count = 1;
1304 this_task_streams->remote_task = task;
1305 this_task_streams->remote_pid = pid;
1306 next_remote_task_fd++;
1307 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1308 remote_task_fd_count = MIN(remote_task_fd_count + 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED);
1309
1310 OSSpinLockUnlock(&remote_fd_list_lock);
1311 return this_task_streams;
1312 }
1313
1314 static void
1315 release_file_streams_for_task(task_t task)
1316 {
1317 OSSpinLockLock(&remote_fd_list_lock);
1318
1319 // decrement in-use count
1320 uint32_t i = 0;
1321 for (i = 0; i < remote_task_fd_count; i++) {
1322 if (remote_fds[i].remote_task == task) {
1323 remote_fds[i].in_use_count--;
1324 break;
1325 }
1326 }
1327
1328 OSSpinLockUnlock(&remote_fd_list_lock);
1329 }
1330
1331 #pragma mark - extern
1332
1333 kern_return_t
1334 __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1335 {
1336 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1337 if (remote_fd == NULL) {
1338 return KERN_FAILURE;
1339 }
1340
1341 update_cache_for_file_streams(remote_fd);
1342
1343 uint32_t collisions = 0;
1344 size_t hash = hash_index(address, remote_fd->cache->cache_node_capacity);
1345 size_t multiplier = hash_multiplier(remote_fd->cache->cache_node_capacity, remote_fd->cache->collision_allowance);
1346 uint64_t located_file_position = 0;
1347
1348 bool found = false;
1349 do {
1350 if (remote_fd->cache->table_memory[hash].address == address) { // hit!
1351 located_file_position = remote_fd->cache->table_memory[hash].index_file_offset;
1352 found = true;
1353 break;
1354 } else if (remote_fd->cache->table_memory[hash].address == 0ull) { // failure!
1355 break;
1356 }
1357
1358 collisions++;
1359 hash = next_hash(hash, multiplier, remote_fd->cache->cache_node_capacity, collisions);
1360
1361 } while (collisions <= remote_fd->cache->collision_allowance);
1362
1363 if (found) {
1364 // prepare for the read; target process could be 32 or 64 bit.
1365 stack_logging_index_event32 *target_32_index = NULL;
1366 stack_logging_index_event64 *target_64_index = NULL;
1367
1368 if (located_file_position >= remote_fd->cache->last_index_file_offset) {
1369 // must be in shared memory
1370 if (remote_fd->cache->shmem) {
1371 if (remote_fd->task_is_64_bit) {
1372 target_64_index = (stack_logging_index_event64*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1373 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1374 } else {
1375 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1376 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1377 }
1378 } else {
1379 found = false;
1380 }
1381
1382 } else {
1383 // it's written to disk
1384 char bufferSpace[128];
1385
1386 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1387 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1388 size_t read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1389 if (read_count) {
1390 if (remote_fd->task_is_64_bit) {
1391 target_64_index = (stack_logging_index_event64*)bufferSpace;
1392 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1393 } else {
1394 target_32_index = (stack_logging_index_event32*)bufferSpace;
1395 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1396 }
1397 } else {
1398 found = false;
1399 }
1400 }
1401 }
1402
1403 release_file_streams_for_task(task);
1404
1405 if (!found) {
1406 return KERN_FAILURE;
1407 }
1408
1409 return __mach_stack_logging_frames_for_uniqued_stack(task, located_file_position, stack_frames_buffer, max_stack_frames, count);
1410 }
1411
1412
1413 kern_return_t
1414 __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context)
1415 {
1416 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1417 if (remote_fd == NULL) {
1418 return KERN_FAILURE;
1419 }
1420
1421 bool reading_all_addresses = (address == 0 ? true : false);
1422 mach_stack_logging_record_t pass_record;
1423 kern_return_t err = KERN_SUCCESS;
1424
1425 // update (read index file once and only once)
1426 update_cache_for_file_streams(remote_fd);
1427
1428 FILE *the_index = (remote_fd->index_file_stream);
1429
1430 // prepare for the read; target process could be 32 or 64 bit.
1431 char bufferSpace[2048]; // 2 kb
1432 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1433 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1434 uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
1435 uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
1436 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1437 size_t number_slots = (size_t)(2048/read_size);
1438 uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
1439
1440 // perform the search
1441 size_t read_count = 0;
1442 int64_t current_file_offset = 0;
1443 uint32_t i;
1444 do {
1445 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1446 // and crop file reading to the point where we last scanned
1447 number_slots = (size_t)MIN(number_slots, total_slots);
1448
1449 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1450 if (number_slots == 0) {
1451 if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
1452 // use shared memory
1453 target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
1454 target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
1455 read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
1456 current_file_offset += read_count * read_size;
1457 } else {
1458 break;
1459 }
1460 } else {
1461 // get and save index (enumerator could modify)
1462 fseeko(the_index, current_file_offset, SEEK_SET);
1463 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1464 current_file_offset = ftello(the_index);
1465 total_slots -= read_count;
1466 }
1467
1468 if (remote_fd->task_is_64_bit) {
1469 for (i = 0; i < read_count; i++) {
1470 if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
1471 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
1472 pass_record.argument = target_64_index[i].argument;
1473 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
1474 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
1475 enumerator(pass_record, context);
1476 }
1477 }
1478 } else {
1479 for (i = 0; i < read_count; i++) {
1480 if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
1481 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
1482 pass_record.argument = target_32_index[i].argument;
1483 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
1484 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
1485 enumerator(pass_record, context);
1486 }
1487 }
1488 }
1489 } while (read_count);
1490
1491 release_file_streams_for_task(task);
1492 return err;
1493 }
1494
1495
1496 kern_return_t
1497 __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1498 {
1499 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task);
1500 if (remote_fd == NULL) return KERN_FAILURE;
1501
1502 __unwind_stack_from_table_index(&remote_fd->cache->uniquing_table, stack_identifier, stack_frames_buffer, count, max_stack_frames);
1503
1504 release_file_streams_for_task(task);
1505
1506 if (*count) return KERN_SUCCESS;
1507 else return KERN_FAILURE;
1508 }
1509
1510
1511 #ifdef TEST_DISK_STACK_LOGGING
1512
1513 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1514
1515 #include <sys/wait.h>
1516
1517 int
1518 main()
1519 {
1520 int status;
1521 int i;
1522 size_t total_globals = 0ul;
1523
1524 fprintf(stderr, "master test process is %d\n", getpid());
1525 fprintf(stderr, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers)); total_globals += sizeof(pre_write_buffers);
1526 fprintf(stderr, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer)); total_globals += sizeof(stack_buffer);
1527 fprintf(stderr, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address)); total_globals += sizeof(last_logged_malloc_address);
1528 fprintf(stderr, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name)); total_globals += sizeof(stack_log_file_base_name);
1529 fprintf(stderr, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix)); total_globals += sizeof(stack_log_file_suffix);
1530 fprintf(stderr, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix)); total_globals += sizeof(stack_log_link_suffix);
1531 fprintf(stderr, "sizeof stack_log_location: %lu\n", sizeof(stack_log_location)); total_globals += sizeof(stack_log_location);
1532 fprintf(stderr, "sizeof stack_log_reference_file: %lu\n", sizeof(stack_log_reference_file)); total_globals += sizeof(stack_log_reference_file);
1533 fprintf(stderr, "sizeof index_file_path: %lu\n", sizeof(index_file_path)); total_globals += sizeof(index_file_path);
1534 fprintf(stderr, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor)); total_globals += sizeof(index_file_descriptor);
1535 fprintf(stderr, "sizeof remote_fds: %lu\n", sizeof(remote_fds)); total_globals += sizeof(remote_fds);
1536 fprintf(stderr, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd)); total_globals += sizeof(next_remote_task_fd);
1537 fprintf(stderr, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count)); total_globals += sizeof(remote_task_fd_count);
1538 fprintf(stderr, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock)); total_globals += sizeof(remote_fd_list_lock);
1539 fprintf(stderr, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction)); total_globals += sizeof(logging_use_compaction);
1540
1541 fprintf(stderr, "size of all global data: %lu\n", total_globals);
1542
1543 create_log_file();
1544
1545 // create a few child processes and exit them cleanly so their logs should get cleaned up
1546 fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
1547 for (i = 0; i < 3; i++) {
1548 if (fork() == 0) {
1549 fprintf(stderr, "\nin child processes %d\n", getpid());
1550 create_log_file();
1551 fprintf(stderr, "exiting child processes %d\n", getpid());
1552 exit(1);
1553 }
1554 wait(&status);
1555 }
1556
1557 // create a few child processes and abruptly _exit them, leaving their logs around
1558 fprintf(stderr, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1559 for (i = 0; i < 3; i++) {
1560 if (fork() == 0) {
1561 fprintf(stderr, "\nin child processes %d\n", getpid());
1562 create_log_file();
1563 fprintf(stderr, "exiting child processes %d\n", getpid());
1564 _exit(1);
1565 }
1566 wait(&status);
1567 }
1568
1569 // this should reap any remaining logs
1570 fprintf(stderr, "\nexiting master test process %d\n", getpid());
1571 delete_log_files();
1572 return 0;
1573 }
1574
1575 #endif