]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging_disk.c
5a48ca35a9dc13d0ce8a0189c7f653db37d99f04
[apple/libc.git] / gen / stack_logging_disk.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <pthread.h>
38 #include <paths.h>
39 #include <errno.h>
40 #include "stack_logging.h"
41 #include "malloc_printf.h"
42 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
43
44 #pragma mark -
45 #pragma mark Defines
46
47 #ifdef TEST_DISK_STACK_LOGGING
48 #define _malloc_printf fprintf
49 #undef ASL_LEVEL_INFO
50 #define ASL_LEVEL_INFO stderr
51 #endif
52
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
56
57 #define BACKTRACE_UNIQUING_DEBUG 0
58
59 // The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
60 // 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
61 // increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
62 // performance O(c*n))
63 #define EXPAND_FACTOR 2
64 #define COLLISION_GROWTH_RATE 3
65
66 // For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
67 // Some useful numbers for the initial max collision value (desiring 66% fill):
68 // 16K-23K slots -> 16 collisions
69 // 24K-31K slots -> 17 collisions
70 // 32K-47K slots -> 18 collisions
71 // 48K-79K slots -> 19 collisions
72 // 80K-96K slots -> 20 collisions
73 #define INITIAL_MAX_COLLIDE 19
74 #define DEFAULT_UNIQUING_PAGE_SIZE 256
75
76 #pragma mark -
77 #pragma mark Macros
78
79 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
80 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
81 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
82
83 #pragma mark -
84 #pragma mark Types
85
86 typedef struct {
87 uintptr_t argument;
88 uintptr_t address;
89 uint64_t offset_and_flags; // top 8 bits are actually the flags!
90 } stack_logging_index_event;
91
92 typedef struct {
93 uint32_t argument;
94 uint32_t address;
95 uint64_t offset_and_flags; // top 8 bits are actually the flags!
96 } stack_logging_index_event32;
97
98 typedef struct {
99 uint64_t argument;
100 uint64_t address;
101 uint64_t offset_and_flags; // top 8 bits are actually the flags!
102 } stack_logging_index_event64;
103
104 #pragma pack(push,4)
105 typedef struct {
106 uint64_t numPages; // number of pages of the table
107 uint64_t numNodes;
108 uint64_t tableSize;
109 uint64_t untouchableNodes;
110 mach_vm_address_t table_address;
111 int32_t max_collide;
112 // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
113 // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
114 #if BACKTRACE_UNIQUING_DEBUG
115 uint64_t nodesFull;
116 uint64_t backtracesContained;
117 #endif
118 mach_vm_address_t *table; // allocated using vm_allocate()
119 } backtrace_uniquing_table;
120 #pragma pack(pop)
121
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
125 #pragma pack(push,4)
126 typedef struct {
127 uint64_t start_index_offset;
128 uint32_t next_free_index_buffer_offset;
129 mach_vm_address_t uniquing_table_address;
130 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
131 backtrace_uniquing_table *uniquing_table;
132 } stack_buffer_shared_memory;
133 #pragma pack(pop)
134
135 // target process address -> record table (for __mach_stack_logging_get_frames)
136 typedef struct {
137 uint64_t address;
138 uint64_t index_file_offset;
139 } remote_index_node;
140
141 // for caching index information client-side:
142 typedef struct {
143 size_t cache_size;
144 size_t cache_node_capacity;
145 uint32_t collision_allowance;
146 remote_index_node *table_memory; // this can be malloced; it's on the client side.
147 stack_buffer_shared_memory *shmem; // shared memory
148 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
149 uint32_t last_pre_written_index_size;
150 uint64_t last_index_file_offset;
151 backtrace_uniquing_table uniquing_table; // snapshot of the remote process' uniquing table
152 } remote_index_cache;
153
154 // for reading stack history information from remote processes:
155 typedef struct {
156 task_t remote_task;
157 pid_t remote_pid;
158 int32_t task_is_64_bit;
159 int32_t in_use_count;
160 FILE *index_file_stream;
161 remote_index_cache *cache;
162 } remote_task_file_streams;
163
164 #pragma mark -
165 #pragma mark Constants/Globals
166
167 static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
168
169 // support for multi-threaded forks
170 extern void __stack_logging_fork_prepare();
171 extern void __stack_logging_fork_parent();
172 extern void __stack_logging_fork_child();
173
174 // support for gdb and others checking for stack_logging locks
175 __private_extern__ boolean_t __stack_logging_locked();
176
177 // single-thread access variables
178 static stack_buffer_shared_memory *pre_write_buffers;
179 static vm_address_t *stack_buffer;
180 static uintptr_t last_logged_malloc_address = 0;
181
182 // Constants to define stack logging file path names.
183 // Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
184 // unless the base directory is specified otherwise with MallocStackLoggingDirectory.
185 // In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
186 static const char *stack_log_file_base_name = "stack-logs.";
187 static const char *stack_log_file_suffix = ".index";
188 static const char *stack_log_link_suffix = ".link";
189
190 static void *stack_log_path_buffers = NULL;
191 static char *stack_log_location = NULL;
192 static char *stack_log_reference_file = NULL;
193 char *__stack_log_file_path__ = NULL;
194 static int index_file_descriptor = -1;
195
196 // for accessing remote log files
197 static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
198 static uint32_t next_remote_task_fd = 0;
199 static uint32_t remote_task_fd_count = 0;
200 static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
201
202 // activation variables
203 static int logging_use_compaction = 1; // set this to zero to always disable compaction.
204
205 // We set malloc_logger to NULL to disable logging, if we encounter errors
206 // during file writing
207 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
208 extern malloc_logger_t *malloc_logger;
209
210 #pragma mark -
211 #pragma mark In-Memory Backtrace Uniquing
212
213 static __attribute__((always_inline))
214 inline void*
215 allocate_pages(uint64_t memSize)
216 {
217 mach_vm_address_t allocatedMem = 0ull;
218 if (mach_vm_allocate(mach_task_self(), &allocatedMem, memSize, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)) != KERN_SUCCESS) {
219 malloc_printf("allocate_pages(): virtual memory exhaused!\n");
220 }
221 return (void*)(uintptr_t)allocatedMem;
222 }
223
224 static __attribute__((always_inline))
225 inline int
226 deallocate_pages(void* memPointer, uint64_t memSize)
227 {
228 return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)memPointer, memSize);
229 }
230
231 static backtrace_uniquing_table*
232 __create_uniquing_table(void)
233 {
234 backtrace_uniquing_table *uniquing_table = (backtrace_uniquing_table*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table)));
235 if (!uniquing_table) return NULL;
236 bzero(uniquing_table, sizeof(backtrace_uniquing_table));
237 uniquing_table->numPages = DEFAULT_UNIQUING_PAGE_SIZE;
238 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
239 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
240 uniquing_table->table = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
241 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
242 uniquing_table->max_collide = INITIAL_MAX_COLLIDE;
243 uniquing_table->untouchableNodes = 0;
244
245 #if BACKTRACE_UNIQUING_DEBUG
246 malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table->tableSize >> 10, uniquing_table->tableSize >> 20, uniquing_table->numNodes, uniquing_table->untouchableNodes);
247 malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table->table, (void*)((uintptr_t)uniquing_table->table + (uintptr_t)uniquing_table->tableSize));
248 #endif
249 return uniquing_table;
250 }
251
252 static void
253 __expand_uniquing_table(backtrace_uniquing_table *uniquing_table)
254 {
255 mach_vm_address_t *oldTable = uniquing_table->table;
256 uint64_t oldsize = uniquing_table->tableSize;
257 uint64_t oldnumnodes = uniquing_table->numNodes;
258
259 uniquing_table->numPages = uniquing_table->numPages << EXPAND_FACTOR;
260 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
261 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
262 mach_vm_address_t *newTable = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
263
264 uniquing_table->table = newTable;
265 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
266 uniquing_table->max_collide = uniquing_table->max_collide + COLLISION_GROWTH_RATE;
267
268 if (mach_vm_copy(mach_task_self(), (mach_vm_address_t)(uintptr_t)oldTable, oldsize, (mach_vm_address_t)(uintptr_t)newTable) != KERN_SUCCESS) {
269 malloc_printf("expandUniquingTable(): VMCopyFailed\n");
270 }
271 uniquing_table->untouchableNodes = oldnumnodes;
272
273 #if BACKTRACE_UNIQUING_DEBUG
274 malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
275 uniquing_table->nodesFull, oldnumnodes, (int)(((uniquing_table->nodesFull * 100.0) / (double)oldnumnodes) + 0.5),
276 uniquing_table->numNodes, uniquing_table->untouchableNodes, uniquing_table->backtracesContained);
277 malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable, (void*)((uintptr_t)newTable + (uintptr_t)(uniquing_table->tableSize)));
278 malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable, (void*)((uintptr_t)oldTable + (uintptr_t)oldsize));
279 #endif
280
281 if (deallocate_pages(oldTable, oldsize) != KERN_SUCCESS) {
282 malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table->table);
283 }
284 }
285
286 static int
287 __enter_frames_in_table(backtrace_uniquing_table *uniquing_table, uint64_t *foundIndex, mach_vm_address_t *frames, int32_t count)
288 {
289 // The hash values need to be the same size as the addresses (because we use the value -1), for clarity, define a new type
290 typedef mach_vm_address_t hash_index_t;
291
292 mach_vm_address_t thisPC;
293 hash_index_t hash, uParent = (hash_index_t)(-1ll), modulus = (uniquing_table->numNodes-uniquing_table->untouchableNodes-1);
294 int32_t collisions, lcopy = count, returnVal = 1;
295 hash_index_t hash_multiplier = ((uniquing_table->numNodes - uniquing_table->untouchableNodes)/(uniquing_table->max_collide*2+1));
296 mach_vm_address_t *node;
297 while (--lcopy >= 0) {
298 thisPC = frames[lcopy];
299
300 // hash = initialHash(uniquing_table, uParent, thisPC);
301 hash = uniquing_table->untouchableNodes + (((uParent << 4) ^ (thisPC >> 2)) % modulus);
302 collisions = uniquing_table->max_collide;
303
304 while (collisions--) {
305 node = uniquing_table->table + (hash * 2);
306
307 if (*node == 0 && node[1] == 0) {
308 // blank; store this entry!
309 // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
310 node[0] = thisPC;
311 node[1] = uParent;
312 uParent = hash;
313 #if BACKTRACE_UNIQUING_DEBUG
314 uniquing_table->nodesFull++;
315 if (lcopy == 0) {
316 uniquing_table->backtracesContained++;
317 }
318 #endif
319 break;
320 }
321 if (*node == thisPC && node[1] == uParent) {
322 // hit! retrieve index and go.
323 uParent = hash;
324 break;
325 }
326
327 hash += collisions * hash_multiplier + 1;
328
329 if (hash >= uniquing_table->numNodes) {
330 hash -= (uniquing_table->numNodes - uniquing_table->untouchableNodes); // wrap around.
331 }
332 }
333
334 if (collisions < 0) {
335 returnVal = 0;
336 break;
337 }
338 }
339
340 if (returnVal) *foundIndex = uParent;
341
342 return returnVal;
343 }
344
345 static void
346 __unwind_stack_from_table_index(backtrace_uniquing_table *uniquing_table, uint64_t index_pos, mach_vm_address_t *out_frames_buffer, uint32_t *out_frames_count, uint32_t max_frames)
347 {
348 mach_vm_address_t *node = uniquing_table->table + (index_pos * 2);
349 uint32_t foundFrames = 0;
350 if (index_pos < uniquing_table->numNodes) {
351 while (foundFrames < max_frames) {
352 out_frames_buffer[foundFrames++] = node[0];
353 if (node[1] == (mach_vm_address_t)(-1ll)) break;
354 node = uniquing_table->table + (node[1] * 2);
355 }
356 }
357
358 *out_frames_count = foundFrames;
359 }
360
361 #pragma mark -
362 #pragma mark Disk Stack Logging
363
364 static void delete_log_files(void); // pre-declare
365 static int delete_logging_file(char *log_location);
366
367 static void
368 append_int(char * filename, pid_t pid, size_t maxLength)
369 {
370 size_t len = strlen(filename);
371
372 uint32_t count = 0;
373 pid_t value = pid;
374 while (value > 0) {
375 value /= 10;
376 count++;
377 }
378
379 if (len + count >= maxLength) return; // don't modify the string if it would violate maxLength
380
381 filename[len + count] = '\0';
382
383 value = pid;
384 uint32_t i;
385 for (i = 0 ; i < count ; i ++) {
386 filename[len + count - 1 - i] = '0' + value % 10;
387 value /= 10;
388 }
389 }
390
391 /*
392 * Check various temporary directory options starting with _PATH_TMP and use confstr.
393 * Allocating and releasing target buffer is the caller's responsibility.
394 */
395 static bool
396 get_writeable_temp_dir(char* target)
397 {
398 if (!target) return false;
399 if (-1 != access(_PATH_TMP, W_OK)) {
400 strlcpy(target, _PATH_TMP, (size_t)PATH_MAX);
401 return true;
402 }
403 size_t n = confstr(_CS_DARWIN_USER_TEMP_DIR, target, (size_t) PATH_MAX);
404 if ((n > 0) && (n < PATH_MAX)) return true;
405 n = confstr(_CS_DARWIN_USER_CACHE_DIR, target, (size_t) PATH_MAX);
406 if ((n > 0) && (n < PATH_MAX)) return true;
407 /* No writeable tmp directory found. Maybe shd try /private/var/tmp for device here ... */
408 *target = '\0';
409 return false;
410 }
411
412 /*
413 * If successful, returns path to log file that was created, otherwise NULL.
414 *
415 * The log could be in one of 3 places (in decreasing order of preference)
416 *
417 * 1) value of environment variable MallocStackLoggingDirectory
418 * 2) the temp directory /tmp for desktop apps and internal apps on devices, or
419 * 3) the sandbox location + tmp/ in case of 3rd party apps on the device.
420 *
421 * For 1 and 3, we create a .link file with the path of the file. We prefer to
422 * create this file in /tmp, but if we are unable to (device 3rd party case),
423 * we create it in the same location as the .index file and issue a message
424 * in syslog asking for it to be copied to /tmp to enable tools.
425 *
426 */
427 static char *
428 create_log_file(void)
429 {
430 pid_t pid = getpid();
431 const char *progname = getprogname();
432 char *created_log_location = NULL;
433
434 if (stack_log_path_buffers == NULL) {
435 /*
436 * on first use, allocate buffers directly from the OS without
437 * using malloc
438 */
439
440 stack_log_path_buffers = allocate_pages((uint64_t)round_page(3*PATH_MAX));
441 if (stack_log_path_buffers == NULL) {
442 _malloc_printf(ASL_LEVEL_INFO, "unable to allocate memory for path buffers\n");
443 return NULL;
444 }
445
446 stack_log_location = &((char *)stack_log_path_buffers)[0*PATH_MAX];
447 stack_log_reference_file = &((char *)stack_log_path_buffers)[1*PATH_MAX];
448 __stack_log_file_path__ = &((char *)stack_log_path_buffers)[2*PATH_MAX];
449 }
450
451 // WARNING! use of snprintf can induce malloc() calls
452 bool use_alternate_location = false;
453 char *evn_log_directory = getenv("MallocStackLoggingDirectory");
454 size_t stack_log_len;
455 if (evn_log_directory && *evn_log_directory) {
456 use_alternate_location = true;
457 strlcpy(stack_log_location, evn_log_directory, (size_t)PATH_MAX);
458 }
459 if (!use_alternate_location || (access(stack_log_location, W_OK) == -1)) {
460 if (!get_writeable_temp_dir(stack_log_location)) {
461 _malloc_printf(ASL_LEVEL_INFO, "No writeable tmp dir\n");
462 return NULL;
463 }
464 if (0 != strcmp(stack_log_location, _PATH_TMP))
465 use_alternate_location = true;
466 }
467 stack_log_len = strlen(stack_log_location);
468 // add the '/' only if it's not already there.
469 if (stack_log_location[stack_log_len-1] != '/') {
470 strlcat(stack_log_location, "/", (size_t)PATH_MAX);
471 ++stack_log_len;
472 }
473
474 strlcpy(__stack_log_file_path__, stack_log_location, (size_t)PATH_MAX);
475
476 strlcat(__stack_log_file_path__, stack_log_file_base_name, (size_t)PATH_MAX);
477 append_int(__stack_log_file_path__, pid, (size_t)PATH_MAX);
478 if (progname && progname[0] != '\0') {
479 strlcat(__stack_log_file_path__, ".", (size_t)PATH_MAX);
480 strlcat(__stack_log_file_path__, progname, (size_t)PATH_MAX);
481 }
482 if (!use_alternate_location) strlcat(__stack_log_file_path__, ".XXXXXX", (size_t)PATH_MAX);
483 strlcat(__stack_log_file_path__, stack_log_file_suffix, (size_t)PATH_MAX);
484
485 // Securely create the log file.
486 if ((index_file_descriptor = mkstemps(__stack_log_file_path__, (int)strlen(stack_log_file_suffix))) != -1) {
487 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", __stack_log_file_path__);
488 created_log_location = __stack_log_file_path__;
489 } else {
490 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack logs at %s\n", stack_log_location);
491 if (use_alternate_location) delete_logging_file(stack_log_reference_file);
492 stack_log_reference_file[0] = '\0';
493 stack_log_location[0] = '\0';
494 __stack_log_file_path__[0] = '\0';
495 created_log_location = NULL;
496 return created_log_location;
497 }
498
499 // in the case where the user has specified an alternate location, drop a reference file
500 // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
501 // stack logging file there.
502 bool use_alternate_link_location = false;
503 if (use_alternate_location) {
504 strlcpy(stack_log_reference_file, _PATH_TMP, (size_t)PATH_MAX);
505 if (access(stack_log_reference_file, W_OK) == -1) {
506 strlcpy(stack_log_reference_file, stack_log_location, (size_t)PATH_MAX);
507 use_alternate_link_location = true;
508 }
509 strlcat(stack_log_reference_file, stack_log_file_base_name, (size_t)PATH_MAX);
510 append_int(stack_log_reference_file, pid, (size_t)PATH_MAX);
511 if (progname && progname[0] != '\0') {
512 strlcat(stack_log_reference_file, ".", (size_t)PATH_MAX);
513 strlcat(stack_log_reference_file, progname, (size_t)PATH_MAX);
514 }
515 if (!use_alternate_link_location)
516 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
517 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
518 strlcat(stack_log_reference_file, stack_log_link_suffix, (size_t)PATH_MAX);
519
520 int link_file_descriptor = mkstemps(stack_log_reference_file, (int)strlen(stack_log_link_suffix));
521 if (link_file_descriptor == -1) {
522 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack reference file %s at %s\n",
523 stack_log_reference_file, stack_log_location);
524 } else {
525 ssize_t written = write(link_file_descriptor, __stack_log_file_path__, strlen(__stack_log_file_path__));
526 if (written < (ssize_t)strlen(__stack_log_file_path__)) {
527 _malloc_printf(ASL_LEVEL_INFO, "unable to write to stack reference file %s at %s\n",
528 stack_log_reference_file, stack_log_location);
529 } else {
530 const char *description_string = "\n(This is a reference file to the stack logs at the path above.)\n";
531 write(link_file_descriptor, description_string, strlen(description_string));
532 }
533 }
534 close(link_file_descriptor);
535 }
536 if (use_alternate_link_location) {
537 _malloc_printf(ASL_LEVEL_INFO, "Please issue: cp %s %s\n", stack_log_reference_file, _PATH_TMP);
538 }
539 return created_log_location;
540 }
541
542 // Check to see if the log file is actually a reference to another location
543 static int
544 log_file_is_reference(char *log_location, char *out_reference_loc_buffer, size_t max_reference_path_size)
545 {
546 if (log_location == NULL || log_location[0] == '\0') return 0;
547
548 size_t log_len = strlen(log_location);
549 size_t link_suffix_len = strlen(stack_log_link_suffix);
550 if (log_len < link_suffix_len || strncmp(log_location+log_len-link_suffix_len, stack_log_link_suffix, link_suffix_len) != 0) {
551 // not a reference file.
552 return 0;
553 }
554
555 if (!out_reference_loc_buffer || max_reference_path_size == 0) return 1;
556
557 FILE *reference_file = fopen(log_location, "r");
558 if (reference_file == NULL) {
559 // if unable to open the file, it may be because another user created it; no need to warn.
560 out_reference_loc_buffer[0] = '\0';
561 return 1;
562 }
563
564 char *ret = fgets(out_reference_loc_buffer, (int)max_reference_path_size, reference_file);
565 if (!ret) {
566 out_reference_loc_buffer[0] = '\0';
567 _malloc_printf(ASL_LEVEL_INFO, "unable to read from stack logging reference file at %s\n", log_location);
568 return 1;
569 } else {
570 size_t read_line_len = strlen(out_reference_loc_buffer);
571 if (read_line_len >= 1 && out_reference_loc_buffer[read_line_len-1] == '\n') {
572 out_reference_loc_buffer[read_line_len-1] = '\0';
573 }
574 }
575
576 fclose(reference_file);
577
578 return 1;
579 }
580
581 // This function may be called from either the target process when exiting, or from either the the target process or
582 // a stack log analysis process, when reaping orphaned stack log files.
583 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
584 static int
585 delete_logging_file(char *log_location)
586 {
587 if (log_location == NULL || log_location[0] == '\0') return 0;
588
589 struct stat statbuf;
590 if (unlink(log_location) != 0 && stat(log_location, &statbuf) == 0) {
591 return -1;
592 }
593 return 0;
594 }
595
596 // This function will be called from atexit() in the target process.
597 static void
598 delete_log_files(void)
599 {
600 if (__stack_log_file_path__ && __stack_log_file_path__[0]) {
601 if (delete_logging_file(__stack_log_file_path__) == 0) {
602 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", __stack_log_file_path__);
603 __stack_log_file_path__[0] = '\0';
604 } else {
605 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", __stack_log_file_path__);
606 }
607 }
608 if (stack_log_reference_file && stack_log_reference_file[0]) {
609 delete_logging_file(stack_log_reference_file);
610 }
611 }
612
613 static bool
614 is_process_running(pid_t pid)
615 {
616 struct kinfo_proc kpt[1];
617 size_t size = sizeof(struct kinfo_proc);
618 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
619
620 sysctl(mib, 4, kpt, &size, NULL, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
621
622 return (size==sizeof(struct kinfo_proc));
623 }
624
625 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
626 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
627 // So, reap any stack log files for processes that no longer exist.
628 //
629 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
630 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
631 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
632 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
633 // its own log files.
634 static void
635 reap_orphaned_log_files(bool remove_for_this_pid)
636 {
637 DIR *dp;
638 struct dirent *entry;
639 char prefix_name[PATH_MAX];
640 char pathname[PATH_MAX];
641 pid_t current_pid = getpid();
642
643 if ((dp = opendir(_PATH_TMP)) == NULL) {
644 return;
645 }
646
647 strlcpy(prefix_name, stack_log_file_base_name, (size_t)PATH_MAX);
648 size_t prefix_length = strlen(prefix_name);
649
650 while ( (entry = readdir(dp)) != NULL ) {
651 if ( entry->d_type != DT_DIR && entry->d_type != DT_LNK && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
652 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
653 if ( (! is_process_running((pid_t)pid)) || (remove_for_this_pid && (pid_t)pid == current_pid) ) {
654 strlcpy(pathname, _PATH_TMP, (size_t)PATH_MAX);
655 strlcat(pathname, entry->d_name, (size_t)PATH_MAX);
656 char reference_file_buffer[PATH_MAX];
657 bool pathname_is_ref_file = false;
658 if (log_file_is_reference(pathname, reference_file_buffer, (size_t)PATH_MAX) && *reference_file_buffer) {
659 pathname_is_ref_file = true;
660 if (delete_logging_file(reference_file_buffer) == 0) {
661 if (remove_for_this_pid && pid == current_pid) {
662 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", reference_file_buffer);
663 } else {
664 _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, reference_file_buffer);
665 }
666 }
667 }
668 if (delete_logging_file(pathname) == 0) {
669 if (remove_for_this_pid && pid == current_pid) {
670 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
671 } else {
672 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, pathname);
673 }
674 char shmem_name_string[PATH_MAX];
675 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
676 append_int(shmem_name_string, (pid_t)pid, (size_t)PATH_MAX);
677 if (pid != current_pid) shm_unlink(shmem_name_string);
678 }
679 }
680 }
681 }
682 closedir(dp);
683 }
684
685 /*
686 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
687 * for disabling any future logging in this process and for informing the user.
688 */
689 static void
690 disable_stack_logging(void)
691 {
692 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
693 stack_logging_enable_logging = 0;
694 malloc_logger = NULL;
695 }
696
697 /* A wrapper around write() that will try to reopen the index/stack file and
698 * write to it if someone closed it underneath us (e.g. the process we just
699 * started decide to close all file descriptors except stin/err/out). Some
700 * programs like to do that and calling abort() on them is rude.
701 */
702 static ssize_t
703 robust_write(int fd, const void *buf, size_t nbyte) {
704 extern int errno;
705 ssize_t written = write(fd, buf, nbyte);
706 if (written == -1 && errno == EBADF) {
707 char *file_to_reopen = NULL;
708 int *fd_to_reset = NULL;
709
710 // descriptor was closed on us. We need to reopen it
711 if (fd == index_file_descriptor) {
712 file_to_reopen = __stack_log_file_path__;
713 fd_to_reset = &index_file_descriptor;
714 } else {
715 // We don't know about this file. Return (and abort()).
716 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor; expecting stack logging index file\n");
717 return -1;
718 }
719
720 // The file *should* already exist. If not, fail.
721 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
722 if (fd < 3) {
723 // If we somehow got stdin/out/err, we need to relinquish them and
724 // get another fd.
725 int fds_to_close[3] = { 0 };
726 while (fd < 3) {
727 if (fd == -1) {
728 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack logging file %s\n", file_to_reopen);
729 delete_log_files();
730 return -1;
731 }
732 fds_to_close[fd] = 1;
733 fd = dup(fd);
734 }
735
736 // We have an fd we like. Close the ones we opened.
737 if (fds_to_close[0]) close(0);
738 if (fds_to_close[1]) close(1);
739 if (fds_to_close[2]) close(2);
740 }
741
742 *fd_to_reset = fd;
743 written = write(fd, buf, nbyte);
744 }
745 return written;
746 }
747
748 static void
749 flush_data(void)
750 {
751 ssize_t written; // signed size_t
752 size_t remaining;
753 char * p;
754
755 if (index_file_descriptor == -1) {
756 if (create_log_file() == NULL) {
757 return;
758 }
759 }
760
761 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
762 p = pre_write_buffers->index_buffer;
763 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
764 while (remaining > 0) {
765 written = robust_write(index_file_descriptor, p, remaining);
766 if (written == -1) {
767 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n",
768 __stack_log_file_path__, strerror(errno));
769 disable_stack_logging();
770 return;
771 }
772 p += written;
773 remaining -= written;
774 }
775
776 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
777 pre_write_buffers->next_free_index_buffer_offset = 0;
778 }
779
780 static void
781 prepare_to_log_stacks(void)
782 {
783 if (!pre_write_buffers) {
784 last_logged_malloc_address = 0ul;
785 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
786
787 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
788 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
789 // the contents of these buffers don't change while being inspected.
790 char shmem_name_string[PATH_MAX];
791 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
792 append_int(shmem_name_string, getpid(), (size_t)PATH_MAX);
793
794 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
795 if (shmid < 0) {
796 // Failed to create shared memory region; turn off stack logging.
797 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
798 disable_stack_logging();
799 return;
800 }
801
802 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
803 ftruncate(shmid, (off_t)full_shared_mem_size);
804 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
805 close(shmid);
806
807 if (MAP_FAILED == pre_write_buffers) {
808 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
809 disable_stack_logging();
810 return;
811 }
812
813 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
814 pre_write_buffers->start_index_offset = 0ull;
815 pre_write_buffers->next_free_index_buffer_offset = 0;
816
817 // create the backtrace uniquing table
818 pre_write_buffers->uniquing_table = __create_uniquing_table();
819 pre_write_buffers->uniquing_table_address = (mach_vm_address_t)(uintptr_t)pre_write_buffers->uniquing_table;
820 if (!pre_write_buffers->uniquing_table) {
821 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack uniquing table\n");
822 disable_stack_logging();
823 return;
824 }
825
826 stack_buffer = (vm_address_t*)allocate_pages((uint64_t)round_page(sizeof(vm_address_t) * STACK_LOGGING_MAX_STACK_SIZE));
827 if (!stack_buffer) {
828 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack trace buffer\n");
829 disable_stack_logging();
830 return;
831 }
832
833 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
834 atexit(delete_log_files); // atexit() can call malloc()
835 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
836
837 // this call ensures that the log files exist; analyzing processes will rely on this assumption.
838 if (create_log_file() == NULL) {
839 disable_stack_logging();
840 return;
841 }
842 }
843 }
844
845 void
846 __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
847 {
848 if (!stack_logging_enable_logging) return;
849
850 // check incoming data
851 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
852 uintptr_t swapper = size;
853 size = ptr_arg;
854 ptr_arg = swapper;
855 if (ptr_arg == return_val) return; // realloc had no effect, skipping
856
857 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
858 type_flags ^= stack_logging_type_dealloc;
859 } else {
860 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
861 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
862 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
863 return;
864 }
865 }
866 if (type_flags & stack_logging_type_dealloc) {
867 if (size) {
868 ptr_arg = size;
869 size = 0;
870 } else return; // free(nil)
871 }
872 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
873
874 type_flags &= 0x7;
875
876 // now actually begin
877 prepare_to_log_stacks();
878
879 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
880 if (!stack_logging_enable_logging) return;
881 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
882
883 // lock and enter
884 OSSpinLockLock(&stack_logging_lock);
885
886 if (!stack_logging_enable_logging) {
887 OSSpinLockUnlock(&stack_logging_lock);
888 return;
889 }
890
891 // compaction
892 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
893 // *waves hand* the last allocation never occurred
894 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
895 last_logged_malloc_address = 0ul;
896
897 OSSpinLockUnlock(&stack_logging_lock);
898 return;
899 }
900
901 // gather stack
902 uint32_t count;
903 thread_stack_pcs(stack_buffer, STACK_LOGGING_MAX_STACK_SIZE-1, &count); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
904 stack_buffer[count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
905 num_hot_to_skip += 2;
906 if (count <= num_hot_to_skip) {
907 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
908 OSSpinLockUnlock(&stack_logging_lock);
909 return;
910 }
911
912 // unique stack in memory
913 count -= num_hot_to_skip;
914 #if __LP64__
915 mach_vm_address_t *frames = (mach_vm_address_t*)stack_buffer + num_hot_to_skip;
916 #else
917 mach_vm_address_t frames[STACK_LOGGING_MAX_STACK_SIZE];
918 uint32_t i;
919 for (i = 0; i < count; i++) {
920 frames[i] = stack_buffer[i+num_hot_to_skip];
921 }
922 #endif
923
924 uint64_t uniqueStackIdentifier = (uint64_t)(-1ll);
925 while (!__enter_frames_in_table(pre_write_buffers->uniquing_table, &uniqueStackIdentifier, frames, (int32_t)count)) {
926 __expand_uniquing_table(pre_write_buffers->uniquing_table);
927 }
928
929 stack_logging_index_event current_index;
930 if (type_flags & stack_logging_type_alloc) {
931 current_index.address = STACK_LOGGING_DISGUISE(return_val);
932 current_index.argument = size;
933 if (logging_use_compaction) {
934 last_logged_malloc_address = current_index.address; // disguised
935 }
936 } else {
937 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
938 current_index.argument = 0ul;
939 last_logged_malloc_address = 0ul;
940 }
941 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier, type_flags);
942
943 // the following line is a good debugging tool for logging each allocation event as it happens.
944 // malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
945
946 // flush the data buffer to disk if necessary
947 if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
948 flush_data();
949 }
950
951 // store bytes in buffers
952 memcpy(pre_write_buffers->index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
953 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
954
955 OSSpinLockUnlock(&stack_logging_lock);
956 }
957
958 void
959 __stack_logging_fork_prepare() {
960 OSSpinLockLock(&stack_logging_lock);
961 }
962
963 void
964 __stack_logging_fork_parent() {
965 OSSpinLockUnlock(&stack_logging_lock);
966 }
967
968 void
969 __stack_logging_fork_child() {
970 malloc_logger = NULL;
971 stack_logging_enable_logging = 0;
972 OSSpinLockUnlock(&stack_logging_lock);
973 }
974
975 boolean_t
976 __stack_logging_locked()
977 {
978 bool acquired_lock = OSSpinLockTry(&stack_logging_lock);
979 if (acquired_lock) OSSpinLockUnlock(&stack_logging_lock);
980 return (acquired_lock ? false : true);
981 }
982
983 #pragma mark -
984 #pragma mark Remote Stack Log Access
985
986 #pragma mark - Design notes:
987
988 /*
989
990 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
991 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
992 // Gets the last allocation record about address
993
994 if !address, will load index and iterate through (expensive)
995 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
996 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
997 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
998
999 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
1000 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
1001 // Given a uniqued_stack fills stack_frames_buffer
1002
1003 */
1004
1005 #pragma mark - caching
1006
1007 __attribute__((always_inline)) static inline size_t
1008 hash_index(uint64_t address, size_t max_pos) {
1009 return (size_t)((address >> 2) % (max_pos-1)); // simplicity rules.
1010 }
1011
1012 __attribute__((always_inline)) static inline size_t
1013 hash_multiplier(size_t capacity, uint32_t allowed_collisions) {
1014 return (capacity/(allowed_collisions*2+1));
1015 }
1016
1017 __attribute__((always_inline)) static inline size_t
1018 next_hash(size_t hash, size_t multiplier, size_t capacity, uint32_t collisions) {
1019 hash += multiplier * collisions;
1020 if (hash >= capacity) hash -= capacity;
1021 return hash;
1022 }
1023
1024 static void
1025 transfer_node(remote_index_cache *cache, remote_index_node *old_node)
1026 {
1027 uint32_t collisions = 0;
1028 size_t pos = hash_index(old_node->address, cache->cache_node_capacity);
1029 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1030 do {
1031 if (cache->table_memory[pos].address == old_node->address) { // hit like this shouldn't happen.
1032 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node)\n");
1033 break;
1034 } else if (cache->table_memory[pos].address == 0) { // empty
1035 cache->table_memory[pos] = *old_node;
1036 break;
1037 } else {
1038 collisions++;
1039 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1040 }
1041 } while (collisions <= cache->collision_allowance);
1042
1043 if (collisions > cache->collision_allowance) {
1044 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
1045 }
1046 }
1047
1048 static void
1049 expand_cache(remote_index_cache *cache)
1050 {
1051 // keep old stats
1052 size_t old_node_capacity = cache->cache_node_capacity;
1053 remote_index_node *old_table = cache->table_memory;
1054
1055 // double size
1056 cache->cache_size <<= 2;
1057 cache->cache_node_capacity <<= 2;
1058 cache->collision_allowance += 3;
1059 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1060
1061 // repopulate (expensive!)
1062 size_t i;
1063 for (i = 0; i < old_node_capacity; i++) {
1064 if (old_table[i].address) {
1065 transfer_node(cache, &old_table[i]);
1066 }
1067 }
1068 free(old_table);
1069 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
1070 }
1071
1072 static void
1073 insert_node(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
1074 {
1075 uint32_t collisions = 0;
1076 size_t pos = hash_index(address, cache->cache_node_capacity);
1077 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1078
1079 bool inserted = false;
1080 while (!inserted) {
1081 if (cache->table_memory[pos].address == 0ull || cache->table_memory[pos].address == address) { // hit or empty
1082 cache->table_memory[pos].address = address;
1083 cache->table_memory[pos].index_file_offset = index_file_offset;
1084 inserted = true;
1085 break;
1086 }
1087
1088 collisions++;
1089 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1090
1091 if (collisions > cache->collision_allowance) {
1092 expand_cache(cache);
1093 pos = hash_index(address, cache->cache_node_capacity);
1094 multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1095 collisions = 0;
1096 }
1097 }
1098
1099 }
1100
1101 static void
1102 update_cache_for_file_streams(remote_task_file_streams *descriptors)
1103 {
1104 remote_index_cache *cache = descriptors->cache;
1105
1106 // create from scratch if necessary.
1107 if (!cache) {
1108 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
1109 cache->cache_node_capacity = 1 << 14;
1110 cache->collision_allowance = 17;
1111 cache->last_index_file_offset = 0;
1112 cache->cache_size = cache->cache_node_capacity*sizeof(remote_index_node);
1113 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1114
1115 // now map in the shared memory, if possible
1116 char shmem_name_string[PATH_MAX];
1117 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
1118 append_int(shmem_name_string, descriptors->remote_pid, (size_t)PATH_MAX);
1119
1120 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
1121 if (shmid >= 0) {
1122 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
1123 close(shmid);
1124 }
1125
1126 if (shmid < 0 || cache->shmem == MAP_FAILED) {
1127 // failed to connect to the shared memory region; warn and continue.
1128 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1129 }
1130 }
1131
1132 // suspend and see how much updating there is to do. there are three scenarios, listed below
1133 bool update_snapshot = false;
1134 if (descriptors->remote_task != mach_task_self()) {
1135 task_suspend(descriptors->remote_task);
1136 }
1137
1138 struct stat file_statistics;
1139 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1140 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1141 uint64_t read_this_update = 0;
1142
1143 // the delta indecies is a complex number; there are three cases:
1144 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1145 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1146 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1147 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1148 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1149 uint32_t last_snapshot_scan_index = 0;
1150 if (delta_indecies && cache->shmem) {
1151 // case 3: add cache scanned to known from disk and recalc
1152 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1153 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1154 update_snapshot = true;
1155 } else if (cache->shmem) {
1156 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1157 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1158 }
1159
1160 // no update necessary for the file; check if need a snapshot.
1161 if (delta_indecies == 0) {
1162 if (cache->shmem && !update_snapshot) {
1163 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1164 }
1165 }
1166
1167 // if a snapshot is necessary, memcpy from remote frozen process' memory
1168 // note: there were two ways to do this - spin lock or suspend. suspend allows us to
1169 // analyze processes even if they were artificially suspended. with a lock, there'd be
1170 // worry that the target was suspended with the lock taken.
1171 if (update_snapshot) {
1172 memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
1173 // also need to update our version of the remote uniquing table
1174 vm_address_t local_uniquing_address = 0ul;
1175 mach_msg_type_number_t local_uniquing_size = 0;
1176 mach_vm_size_t desired_size = round_page(sizeof(backtrace_uniquing_table));
1177 kern_return_t err;
1178 if ((err = mach_vm_read(descriptors->remote_task, cache->shmem->uniquing_table_address, desired_size, &local_uniquing_address, &local_uniquing_size)) != KERN_SUCCESS
1179 || local_uniquing_size != desired_size) {
1180 fprintf(stderr, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err, mach_error_string(err));
1181 } else {
1182 // the mach_vm_read was successful, so acquire the uniquing table
1183
1184 // need to re-read the table, so deallocate the current memory
1185 if (cache->uniquing_table.table) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)(cache->uniquing_table.table), cache->uniquing_table.tableSize);
1186
1187 // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
1188 // remote process. this pointer will be mapped shared in a few lines.
1189 cache->uniquing_table = *((backtrace_uniquing_table*)local_uniquing_address);
1190
1191 vm_address_t local_table_address = 0ul;
1192 mach_msg_type_number_t local_table_size = 0;
1193
1194 err = mach_vm_read(descriptors->remote_task, cache->uniquing_table.table_address, cache->uniquing_table.tableSize, &local_table_address, &local_table_size);
1195 if (err == KERN_SUCCESS) cache->uniquing_table.table = (mach_vm_address_t*)local_table_address;
1196 else cache->uniquing_table.table = NULL;
1197
1198 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)local_uniquing_address, (mach_vm_size_t)local_uniquing_size);
1199 }
1200 }
1201
1202 // resume
1203 if (descriptors->remote_task != mach_task_self()) {
1204 task_resume(descriptors->remote_task);
1205 }
1206
1207 if (!update_snapshot && delta_indecies == 0) return; // absolutely no updating needed.
1208
1209 FILE *the_index = (descriptors->index_file_stream);
1210
1211 // prepare for the read; target process could be 32 or 64 bit.
1212
1213 stack_logging_index_event32 *target_32_index = NULL;
1214 stack_logging_index_event64 *target_64_index = NULL;
1215
1216 // perform the update from the file
1217 uint32_t i;
1218 if (delta_indecies) {
1219 char bufferSpace[4096]; // 4 kb
1220 target_32_index = (stack_logging_index_event32*)bufferSpace;
1221 target_64_index = (stack_logging_index_event64*)bufferSpace;
1222 size_t number_slots = (size_t)(4096/read_size);
1223
1224 size_t read_count = 0;
1225 if (fseeko(the_index, (off_t)(cache->last_index_file_offset), SEEK_SET)) {
1226 fprintf(stderr, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1227 }
1228 off_t current_index_position = cache->last_index_file_offset;
1229 do {
1230 number_slots = (size_t)MIN(delta_indecies - read_this_update, number_slots);
1231 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1232 if (descriptors->task_is_64_bit) {
1233 for (i = 0; i < read_count; i++) {
1234 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
1235 read_this_update++;
1236 current_index_position += read_size;
1237 }
1238 } else {
1239 for (i = 0; i < read_count; i++) {
1240 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
1241 read_this_update++;
1242 current_index_position += read_size;
1243 }
1244 }
1245 } while (read_count);
1246
1247 if (read_this_update < delta_indecies) {
1248 fprintf(stderr, "insufficient data in remote stack index file; expected more records.\n");
1249 }
1250 cache->last_index_file_offset += read_this_update * read_size;
1251 }
1252
1253 if (update_snapshot) {
1254 target_32_index = (stack_logging_index_event32*)(cache->snapshot.index_buffer);
1255 target_64_index = (stack_logging_index_event64*)(cache->snapshot.index_buffer);
1256
1257 uint32_t free_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1258 off_t current_index_position = cache->snapshot.start_index_offset;
1259 if (descriptors->task_is_64_bit) {
1260 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1261 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1262 }
1263 } else {
1264 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1265 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1266 }
1267 }
1268 }
1269 }
1270
1271 static void
1272 destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
1273 {
1274 if (descriptors->cache->shmem) {
1275 munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
1276 }
1277 free(descriptors->cache->table_memory);
1278 free(descriptors->cache);
1279 descriptors->cache = NULL;
1280 }
1281
1282 #pragma mark - internal
1283
1284 // In the stack log analysis process, find the stack logging files for target process <pid>
1285 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1286 // If we find such a directory then open the stack logging files in there.
1287 // We might also have been passed the file path if the client first read it from __stack_log_file_path__
1288 // global variable in the target task, as will be needed if the .link cannot be put in /tmp.
1289 static void
1290 open_log_files(pid_t pid, char* file_path, remote_task_file_streams *this_task_streams)
1291 {
1292 DIR *dp;
1293 struct dirent *entry;
1294 char prefix_name[PATH_MAX];
1295 char pathname[PATH_MAX];
1296
1297 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1298
1299 if (file_path != NULL) {
1300 this_task_streams->index_file_stream = fopen(file_path, "r");
1301 return;
1302 }
1303
1304 if ((dp = opendir(_PATH_TMP)) == NULL) {
1305 return;
1306 }
1307
1308 // It's OK to use snprintf in this routine since it should only be called by the clients
1309 // of stack logging, and thus calls to malloc are OK.
1310 snprintf(prefix_name, (size_t)PATH_MAX, "%s%d.", stack_log_file_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1311 size_t prefix_length = strlen(prefix_name);
1312
1313 while ( (entry = readdir(dp)) != NULL ) {
1314 if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
1315 snprintf(pathname, (size_t)PATH_MAX, "%s%s", _PATH_TMP, entry->d_name);
1316 char reference_file[PATH_MAX];
1317 if (log_file_is_reference(pathname, reference_file, (size_t)PATH_MAX)) {
1318 this_task_streams->index_file_stream = fopen(reference_file, "r");
1319 } else {
1320 this_task_streams->index_file_stream = fopen(pathname, "r");
1321 }
1322
1323 break;
1324 }
1325 }
1326 closedir(dp);
1327 }
1328
1329 static remote_task_file_streams*
1330 retain_file_streams_for_task(task_t task, char* file_path)
1331 {
1332 if (task == MACH_PORT_NULL) return NULL;
1333
1334 OSSpinLockLock(&remote_fd_list_lock);
1335
1336 // see if they're already in use
1337 uint32_t i = 0;
1338 for (i = 0; i < remote_task_fd_count; i++) {
1339 if (remote_fds[i].remote_task == task) {
1340 remote_fds[i].in_use_count++;
1341 OSSpinLockUnlock(&remote_fd_list_lock);
1342 return &remote_fds[i];
1343 }
1344 }
1345
1346 // open them
1347 uint32_t failures = 0;
1348 if (remote_task_fd_count == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1349 while (remote_fds[next_remote_task_fd].in_use_count > 0) {
1350 next_remote_task_fd++;
1351 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1352 failures++;
1353 if (failures >= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1354 OSSpinLockUnlock(&remote_fd_list_lock);
1355 return NULL;
1356 }
1357 }
1358 fclose(remote_fds[next_remote_task_fd].index_file_stream);
1359 destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
1360 }
1361
1362 pid_t pid;
1363 kern_return_t err = pid_for_task(task, &pid);
1364 if (err != KERN_SUCCESS) {
1365 OSSpinLockUnlock(&remote_fd_list_lock);
1366 return NULL;
1367 }
1368
1369 remote_task_file_streams *this_task_streams = &remote_fds[next_remote_task_fd];
1370
1371 open_log_files(pid, file_path, this_task_streams);
1372
1373 // check if opens failed
1374 if (this_task_streams->index_file_stream == NULL) {
1375 if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
1376 OSSpinLockUnlock(&remote_fd_list_lock);
1377 return NULL;
1378 }
1379
1380 // check if target pid is running 64-bit
1381 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1382 struct kinfo_proc processInfo;
1383 size_t bufsize = sizeof(processInfo);
1384 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, (size_t)0) == 0 && bufsize > 0) {
1385 this_task_streams->task_is_64_bit = processInfo.kp_proc.p_flag & P_LP64;
1386 } else {
1387 this_task_streams->task_is_64_bit = 0;
1388 }
1389
1390 // otherwise set vars and go
1391 this_task_streams->in_use_count = 1;
1392 this_task_streams->remote_task = task;
1393 this_task_streams->remote_pid = pid;
1394 next_remote_task_fd++;
1395 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1396 remote_task_fd_count = MIN(remote_task_fd_count + 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED);
1397
1398 OSSpinLockUnlock(&remote_fd_list_lock);
1399 return this_task_streams;
1400 }
1401
1402 static void
1403 release_file_streams_for_task(task_t task)
1404 {
1405 OSSpinLockLock(&remote_fd_list_lock);
1406
1407 // decrement in-use count
1408 uint32_t i = 0;
1409 for (i = 0; i < remote_task_fd_count; i++) {
1410 if (remote_fds[i].remote_task == task) {
1411 remote_fds[i].in_use_count--;
1412 break;
1413 }
1414 }
1415
1416 OSSpinLockUnlock(&remote_fd_list_lock);
1417 }
1418
1419 #pragma mark - extern
1420
1421 //
1422 // The following is used by client tools like malloc_history and Instruments to pass along the path
1423 // of the index file as read from the target task's __stack_log_file_path__ variable (set in this file)
1424 // Eventually, at a suitable point, this additional argument should just be added to the other APIs below.
1425 //
1426 kern_return_t
1427 __mach_stack_logging_set_file_path(task_t task, char* file_path)
1428 {
1429 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, file_path);
1430 if (remote_fd == NULL) {
1431 return KERN_FAILURE;
1432 }
1433 return KERN_SUCCESS;
1434 }
1435
1436 kern_return_t
1437 __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1438 {
1439 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1440 if (remote_fd == NULL) {
1441 return KERN_FAILURE;
1442 }
1443
1444 update_cache_for_file_streams(remote_fd);
1445
1446 uint32_t collisions = 0;
1447 size_t hash = hash_index(address, remote_fd->cache->cache_node_capacity);
1448 size_t multiplier = hash_multiplier(remote_fd->cache->cache_node_capacity, remote_fd->cache->collision_allowance);
1449 uint64_t located_file_position = 0;
1450
1451 bool found = false;
1452 do {
1453 if (remote_fd->cache->table_memory[hash].address == address) { // hit!
1454 located_file_position = remote_fd->cache->table_memory[hash].index_file_offset;
1455 found = true;
1456 break;
1457 } else if (remote_fd->cache->table_memory[hash].address == 0ull) { // failure!
1458 break;
1459 }
1460
1461 collisions++;
1462 hash = next_hash(hash, multiplier, remote_fd->cache->cache_node_capacity, collisions);
1463
1464 } while (collisions <= remote_fd->cache->collision_allowance);
1465
1466 if (found) {
1467 // prepare for the read; target process could be 32 or 64 bit.
1468 stack_logging_index_event32 *target_32_index = NULL;
1469 stack_logging_index_event64 *target_64_index = NULL;
1470
1471 if (located_file_position >= remote_fd->cache->last_index_file_offset) {
1472 // must be in shared memory
1473 if (remote_fd->cache->shmem) {
1474 if (remote_fd->task_is_64_bit) {
1475 target_64_index = (stack_logging_index_event64*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1476 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1477 } else {
1478 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1479 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1480 }
1481 } else {
1482 found = false;
1483 }
1484
1485 } else {
1486 // it's written to disk
1487 char bufferSpace[128];
1488
1489 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1490 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1491 size_t read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1492 if (read_count) {
1493 if (remote_fd->task_is_64_bit) {
1494 target_64_index = (stack_logging_index_event64*)bufferSpace;
1495 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1496 } else {
1497 target_32_index = (stack_logging_index_event32*)bufferSpace;
1498 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1499 }
1500 } else {
1501 found = false;
1502 }
1503 }
1504 }
1505
1506 release_file_streams_for_task(task);
1507
1508 if (!found) {
1509 return KERN_FAILURE;
1510 }
1511
1512 return __mach_stack_logging_frames_for_uniqued_stack(task, located_file_position, stack_frames_buffer, max_stack_frames, count);
1513 }
1514
1515
1516 kern_return_t
1517 __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context)
1518 {
1519 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1520 if (remote_fd == NULL) {
1521 return KERN_FAILURE;
1522 }
1523
1524 bool reading_all_addresses = (address == 0 ? true : false);
1525 mach_stack_logging_record_t pass_record;
1526 kern_return_t err = KERN_SUCCESS;
1527
1528 // update (read index file once and only once)
1529 update_cache_for_file_streams(remote_fd);
1530
1531 FILE *the_index = (remote_fd->index_file_stream);
1532
1533 // prepare for the read; target process could be 32 or 64 bit.
1534 char bufferSpace[2048]; // 2 kb
1535 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1536 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1537 uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
1538 uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
1539 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1540 size_t number_slots = (size_t)(2048/read_size);
1541 uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
1542
1543 // perform the search
1544 size_t read_count = 0;
1545 int64_t current_file_offset = 0;
1546 uint32_t i;
1547 do {
1548 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1549 // and crop file reading to the point where we last scanned
1550 number_slots = (size_t)MIN(number_slots, total_slots);
1551
1552 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1553 if (number_slots == 0) {
1554 if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
1555 // use shared memory
1556 target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
1557 target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
1558 read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
1559 current_file_offset += read_count * read_size;
1560 } else {
1561 break;
1562 }
1563 } else {
1564 // get and save index (enumerator could modify)
1565 fseeko(the_index, current_file_offset, SEEK_SET);
1566 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1567 current_file_offset = ftello(the_index);
1568 total_slots -= read_count;
1569 }
1570
1571 if (remote_fd->task_is_64_bit) {
1572 for (i = 0; i < read_count; i++) {
1573 if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
1574 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
1575 pass_record.argument = target_64_index[i].argument;
1576 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
1577 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
1578 enumerator(pass_record, context);
1579 }
1580 }
1581 } else {
1582 for (i = 0; i < read_count; i++) {
1583 if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
1584 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
1585 pass_record.argument = target_32_index[i].argument;
1586 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
1587 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
1588 enumerator(pass_record, context);
1589 }
1590 }
1591 }
1592 } while (read_count);
1593
1594 release_file_streams_for_task(task);
1595 return err;
1596 }
1597
1598
1599 kern_return_t
1600 __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1601 {
1602 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1603 if (remote_fd == NULL) return KERN_FAILURE;
1604
1605 __unwind_stack_from_table_index(&remote_fd->cache->uniquing_table, stack_identifier, stack_frames_buffer, count, max_stack_frames);
1606
1607 release_file_streams_for_task(task);
1608
1609 if (*count) return KERN_SUCCESS;
1610 else return KERN_FAILURE;
1611 }
1612
1613
1614 #ifdef TEST_DISK_STACK_LOGGING
1615
1616 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1617
1618 #include <sys/wait.h>
1619
1620 int
1621 main()
1622 {
1623 int status;
1624 int i;
1625 size_t total_globals = 0ul;
1626
1627 fprintf(stderr, "master test process is %d\n", getpid());
1628 fprintf(stderr, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers)); total_globals += sizeof(pre_write_buffers);
1629 fprintf(stderr, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer)); total_globals += sizeof(stack_buffer);
1630 fprintf(stderr, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address)); total_globals += sizeof(last_logged_malloc_address);
1631 fprintf(stderr, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name)); total_globals += sizeof(stack_log_file_base_name);
1632 fprintf(stderr, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix)); total_globals += sizeof(stack_log_file_suffix);
1633 fprintf(stderr, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix)); total_globals += sizeof(stack_log_link_suffix);
1634 fprintf(stderr, "sizeof stack_log_location: %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1635 fprintf(stderr, "sizeof stack_log_reference_file: %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1636 fprintf(stderr, "sizeof __stack_log_file_path__ (index_file_path): %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1637 fprintf(stderr, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor)); total_globals += sizeof(index_file_descriptor);
1638 fprintf(stderr, "sizeof remote_fds: %lu\n", sizeof(remote_fds)); total_globals += sizeof(remote_fds);
1639 fprintf(stderr, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd)); total_globals += sizeof(next_remote_task_fd);
1640 fprintf(stderr, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count)); total_globals += sizeof(remote_task_fd_count);
1641 fprintf(stderr, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock)); total_globals += sizeof(remote_fd_list_lock);
1642 fprintf(stderr, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction)); total_globals += sizeof(logging_use_compaction);
1643
1644 fprintf(stderr, "size of all global data: %lu\n", total_globals);
1645
1646 create_log_file();
1647
1648 // create a few child processes and exit them cleanly so their logs should get cleaned up
1649 fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
1650 for (i = 0; i < 3; i++) {
1651 if (fork() == 0) {
1652 fprintf(stderr, "\nin child processes %d\n", getpid());
1653 create_log_file();
1654 fprintf(stderr, "exiting child processes %d\n", getpid());
1655 exit(1);
1656 }
1657 wait(&status);
1658 }
1659
1660 // create a few child processes and abruptly _exit them, leaving their logs around
1661 fprintf(stderr, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1662 for (i = 0; i < 3; i++) {
1663 if (fork() == 0) {
1664 fprintf(stderr, "\nin child processes %d\n", getpid());
1665 create_log_file();
1666 fprintf(stderr, "exiting child processes %d\n", getpid());
1667 _exit(1);
1668 }
1669 wait(&status);
1670 }
1671
1672 // this should reap any remaining logs
1673 fprintf(stderr, "\nexiting master test process %d\n", getpid());
1674 delete_log_files();
1675 return 0;
1676 }
1677
1678 #endif