]> git.saurik.com Git - apple/libc.git/blob - gen/stack_logging_disk.c
Libc-825.40.1.tar.gz
[apple/libc.git] / gen / stack_logging_disk.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <pthread.h>
38 #include <paths.h>
39 #include <errno.h>
40 #include "stack_logging.h"
41 #include "malloc_printf.h"
42 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
43
44 #pragma mark -
45 #pragma mark Defines
46
47 #ifdef TEST_DISK_STACK_LOGGING
48 #define _malloc_printf fprintf
49 #undef ASL_LEVEL_INFO
50 #define ASL_LEVEL_INFO stderr
51 #endif
52
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
56
57 #define BACKTRACE_UNIQUING_DEBUG 0
58
59 // The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
60 // 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
61 // increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
62 // performance O(c*n))
63 #define EXPAND_FACTOR 2
64 #define COLLISION_GROWTH_RATE 3
65
66 // For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
67 // Some useful numbers for the initial max collision value (desiring 66% fill):
68 // 16K-23K slots -> 16 collisions
69 // 24K-31K slots -> 17 collisions
70 // 32K-47K slots -> 18 collisions
71 // 48K-79K slots -> 19 collisions
72 // 80K-96K slots -> 20 collisions
73 #define INITIAL_MAX_COLLIDE 19
74 #define DEFAULT_UNIQUING_PAGE_SIZE 256
75
76 #pragma mark -
77 #pragma mark Macros
78
79 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
80 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
81 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
82
83 #pragma mark -
84 #pragma mark Types
85
86 typedef struct {
87 uintptr_t argument;
88 uintptr_t address;
89 uint64_t offset_and_flags; // top 8 bits are actually the flags!
90 } stack_logging_index_event;
91
92 typedef struct {
93 uint32_t argument;
94 uint32_t address;
95 uint64_t offset_and_flags; // top 8 bits are actually the flags!
96 } stack_logging_index_event32;
97
98 typedef struct {
99 uint64_t argument;
100 uint64_t address;
101 uint64_t offset_and_flags; // top 8 bits are actually the flags!
102 } stack_logging_index_event64;
103
104 #pragma pack(push,4)
105 typedef struct {
106 uint64_t numPages; // number of pages of the table
107 uint64_t numNodes;
108 uint64_t tableSize;
109 uint64_t untouchableNodes;
110 mach_vm_address_t table_address;
111 int32_t max_collide;
112 // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
113 // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
114 #if BACKTRACE_UNIQUING_DEBUG
115 uint64_t nodesFull;
116 uint64_t backtracesContained;
117 #endif
118 mach_vm_address_t *table; // allocated using vm_allocate()
119 } backtrace_uniquing_table;
120 #pragma pack(pop)
121
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
125 #pragma pack(push,4)
126 typedef struct {
127 uint64_t start_index_offset;
128 uint32_t next_free_index_buffer_offset;
129 mach_vm_address_t uniquing_table_address;
130 char index_buffer[STACK_LOGGING_BLOCK_WRITING_SIZE];
131 backtrace_uniquing_table *uniquing_table;
132 } stack_buffer_shared_memory;
133 #pragma pack(pop)
134
135 // target process address -> record table (for __mach_stack_logging_get_frames)
136 typedef struct {
137 uint64_t address;
138 uint64_t index_file_offset;
139 } remote_index_node;
140
141 // for caching index information client-side:
142 typedef struct {
143 size_t cache_size;
144 size_t cache_node_capacity;
145 uint32_t collision_allowance;
146 remote_index_node *table_memory; // this can be malloced; it's on the client side.
147 stack_buffer_shared_memory *shmem; // shared memory
148 stack_buffer_shared_memory snapshot; // memory snapshot of the remote process' shared memory
149 uint32_t last_pre_written_index_size;
150 uint64_t last_index_file_offset;
151 backtrace_uniquing_table uniquing_table; // snapshot of the remote process' uniquing table
152 } remote_index_cache;
153
154 // for reading stack history information from remote processes:
155 typedef struct {
156 task_t remote_task;
157 pid_t remote_pid;
158 int32_t task_is_64_bit;
159 int32_t in_use_count;
160 FILE *index_file_stream;
161 remote_index_cache *cache;
162 } remote_task_file_streams;
163
164 #pragma mark -
165 #pragma mark Constants/Globals
166
167 static OSSpinLock stack_logging_lock = OS_SPINLOCK_INIT;
168
169 // support for multi-threaded forks
170 extern void __stack_logging_fork_prepare();
171 extern void __stack_logging_fork_parent();
172 extern void __stack_logging_fork_child();
173 extern void __stack_logging_early_finished();
174
175 // support for gdb and others checking for stack_logging locks
176 __private_extern__ boolean_t __stack_logging_locked();
177
178 // single-thread access variables
179 static stack_buffer_shared_memory *pre_write_buffers;
180 static vm_address_t *stack_buffer;
181 static uintptr_t last_logged_malloc_address = 0;
182
183 // Constants to define stack logging file path names.
184 // Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
185 // unless the base directory is specified otherwise with MallocStackLoggingDirectory.
186 // In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
187 static const char *stack_log_file_base_name = "stack-logs.";
188 static const char *stack_log_file_suffix = ".index";
189 static const char *stack_log_link_suffix = ".link";
190
191 static void *stack_log_path_buffers = NULL;
192 static char *stack_log_location = NULL;
193 static char *stack_log_reference_file = NULL;
194 char *__stack_log_file_path__ = NULL;
195 static int index_file_descriptor = -1;
196
197 // for accessing remote log files
198 static remote_task_file_streams remote_fds[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED];
199 static uint32_t next_remote_task_fd = 0;
200 static uint32_t remote_task_fd_count = 0;
201 static OSSpinLock remote_fd_list_lock = OS_SPINLOCK_INIT;
202
203 // activation variables
204 static int logging_use_compaction = 1; // set this to zero to always disable compaction.
205
206 // We set malloc_logger to NULL to disable logging, if we encounter errors
207 // during file writing
208 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
209 extern malloc_logger_t *malloc_logger;
210
211 #pragma mark -
212 #pragma mark In-Memory Backtrace Uniquing
213
214 static __attribute__((always_inline))
215 inline void*
216 allocate_pages(uint64_t memSize)
217 {
218 mach_vm_address_t allocatedMem = 0ull;
219 if (mach_vm_allocate(mach_task_self(), &allocatedMem, memSize, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL)) != KERN_SUCCESS) {
220 malloc_printf("allocate_pages(): virtual memory exhaused!\n");
221 }
222 return (void*)(uintptr_t)allocatedMem;
223 }
224
225 static __attribute__((always_inline))
226 inline int
227 deallocate_pages(void* memPointer, uint64_t memSize)
228 {
229 return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)memPointer, memSize);
230 }
231
232 static backtrace_uniquing_table*
233 __create_uniquing_table(void)
234 {
235 backtrace_uniquing_table *uniquing_table = (backtrace_uniquing_table*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table)));
236 if (!uniquing_table) return NULL;
237 bzero(uniquing_table, sizeof(backtrace_uniquing_table));
238 uniquing_table->numPages = DEFAULT_UNIQUING_PAGE_SIZE;
239 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
240 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
241 uniquing_table->table = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
242 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
243 uniquing_table->max_collide = INITIAL_MAX_COLLIDE;
244 uniquing_table->untouchableNodes = 0;
245
246 #if BACKTRACE_UNIQUING_DEBUG
247 malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table->tableSize >> 10, uniquing_table->tableSize >> 20, uniquing_table->numNodes, uniquing_table->untouchableNodes);
248 malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table->table, (void*)((uintptr_t)uniquing_table->table + (uintptr_t)uniquing_table->tableSize));
249 #endif
250 return uniquing_table;
251 }
252
253 static void
254 __destroy_uniquing_table(backtrace_uniquing_table* table)
255 {
256 deallocate_pages(table->table, table->tableSize);
257 deallocate_pages(table, sizeof(backtrace_uniquing_table));
258 }
259
260 static void
261 __expand_uniquing_table(backtrace_uniquing_table *uniquing_table)
262 {
263 mach_vm_address_t *oldTable = uniquing_table->table;
264 uint64_t oldsize = uniquing_table->tableSize;
265 uint64_t oldnumnodes = uniquing_table->numNodes;
266
267 uniquing_table->numPages = uniquing_table->numPages << EXPAND_FACTOR;
268 uniquing_table->tableSize = uniquing_table->numPages * vm_page_size;
269 uniquing_table->numNodes = ((uniquing_table->tableSize / (sizeof(mach_vm_address_t) * 2)) >> 1) << 1; // make sure it's even.
270 mach_vm_address_t *newTable = (mach_vm_address_t*)(uintptr_t)allocate_pages(uniquing_table->tableSize);
271
272 uniquing_table->table = newTable;
273 uniquing_table->table_address = (uintptr_t)uniquing_table->table;
274 uniquing_table->max_collide = uniquing_table->max_collide + COLLISION_GROWTH_RATE;
275
276 if (mach_vm_copy(mach_task_self(), (mach_vm_address_t)(uintptr_t)oldTable, oldsize, (mach_vm_address_t)(uintptr_t)newTable) != KERN_SUCCESS) {
277 malloc_printf("expandUniquingTable(): VMCopyFailed\n");
278 }
279 uniquing_table->untouchableNodes = oldnumnodes;
280
281 #if BACKTRACE_UNIQUING_DEBUG
282 malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
283 uniquing_table->nodesFull, oldnumnodes, (int)(((uniquing_table->nodesFull * 100.0) / (double)oldnumnodes) + 0.5),
284 uniquing_table->numNodes, uniquing_table->untouchableNodes, uniquing_table->backtracesContained);
285 malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable, (void*)((uintptr_t)newTable + (uintptr_t)(uniquing_table->tableSize)));
286 malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable, (void*)((uintptr_t)oldTable + (uintptr_t)oldsize));
287 #endif
288
289 if (deallocate_pages(oldTable, oldsize) != KERN_SUCCESS) {
290 malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table->table);
291 }
292 }
293
294 static int
295 __enter_frames_in_table(backtrace_uniquing_table *uniquing_table, uint64_t *foundIndex, mach_vm_address_t *frames, int32_t count)
296 {
297 // The hash values need to be the same size as the addresses (because we use the value -1), for clarity, define a new type
298 typedef mach_vm_address_t hash_index_t;
299
300 mach_vm_address_t thisPC;
301 hash_index_t hash, uParent = (hash_index_t)(-1ll), modulus = (uniquing_table->numNodes-uniquing_table->untouchableNodes-1);
302 int32_t collisions, lcopy = count, returnVal = 1;
303 hash_index_t hash_multiplier = ((uniquing_table->numNodes - uniquing_table->untouchableNodes)/(uniquing_table->max_collide*2+1));
304 mach_vm_address_t *node;
305 while (--lcopy >= 0) {
306 thisPC = frames[lcopy];
307
308 // hash = initialHash(uniquing_table, uParent, thisPC);
309 hash = uniquing_table->untouchableNodes + (((uParent << 4) ^ (thisPC >> 2)) % modulus);
310 collisions = uniquing_table->max_collide;
311
312 while (collisions--) {
313 node = uniquing_table->table + (hash * 2);
314
315 if (*node == 0 && node[1] == 0) {
316 // blank; store this entry!
317 // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
318 node[0] = thisPC;
319 node[1] = uParent;
320 uParent = hash;
321 #if BACKTRACE_UNIQUING_DEBUG
322 uniquing_table->nodesFull++;
323 if (lcopy == 0) {
324 uniquing_table->backtracesContained++;
325 }
326 #endif
327 break;
328 }
329 if (*node == thisPC && node[1] == uParent) {
330 // hit! retrieve index and go.
331 uParent = hash;
332 break;
333 }
334
335 hash += collisions * hash_multiplier + 1;
336
337 if (hash >= uniquing_table->numNodes) {
338 hash -= (uniquing_table->numNodes - uniquing_table->untouchableNodes); // wrap around.
339 }
340 }
341
342 if (collisions < 0) {
343 returnVal = 0;
344 break;
345 }
346 }
347
348 if (returnVal) *foundIndex = uParent;
349
350 return returnVal;
351 }
352
353 static void
354 __unwind_stack_from_table_index(backtrace_uniquing_table *uniquing_table, uint64_t index_pos, mach_vm_address_t *out_frames_buffer, uint32_t *out_frames_count, uint32_t max_frames)
355 {
356 mach_vm_address_t *node = uniquing_table->table + (index_pos * 2);
357 uint32_t foundFrames = 0;
358 if (index_pos < uniquing_table->numNodes) {
359 while (foundFrames < max_frames) {
360 out_frames_buffer[foundFrames++] = node[0];
361 if (node[1] == (mach_vm_address_t)(-1ll)) break;
362 node = uniquing_table->table + (node[1] * 2);
363 }
364 }
365
366 *out_frames_count = foundFrames;
367 }
368
369 #pragma mark -
370 #pragma mark Disk Stack Logging
371
372 static void delete_log_files(void); // pre-declare
373 static int delete_logging_file(char *log_location);
374
375 static void
376 append_int(char * filename, pid_t pid, size_t maxLength)
377 {
378 size_t len = strlen(filename);
379
380 uint32_t count = 0;
381 pid_t value = pid;
382 while (value > 0) {
383 value /= 10;
384 count++;
385 }
386
387 if (len + count >= maxLength) return; // don't modify the string if it would violate maxLength
388
389 filename[len + count] = '\0';
390
391 value = pid;
392 uint32_t i;
393 for (i = 0 ; i < count ; i ++) {
394 filename[len + count - 1 - i] = '0' + value % 10;
395 value /= 10;
396 }
397 }
398
399 /*
400 * <rdar://problem/11128080> if we needed to call confstr during init then setting this
401 * flag will postpone stack logging until after Libsystem's initialiser has run.
402 */
403 static void
404 postpone_stack_logging(void)
405 {
406 _malloc_printf(ASL_LEVEL_INFO, "stack logging postponed until after initialization.\n");
407 stack_logging_postponed = 1;
408 }
409
410 /*
411 * Check various temporary directory options starting with _PATH_TMP and use confstr.
412 * Allocating and releasing target buffer is the caller's responsibility.
413 */
414 static bool
415 get_writeable_temp_dir(char* target)
416 {
417 if (!target) return false;
418 if (-1 != access(_PATH_TMP, W_OK)) {
419 strlcpy(target, _PATH_TMP, (size_t)PATH_MAX);
420 return true;
421 }
422 if (getenv("TMPDIR") && (-1 != access(getenv("TMPDIR"), W_OK))) {
423 strlcpy(target, getenv("TMPDIR"), (size_t)PATH_MAX);
424 return true;
425 }
426 if (stack_logging_finished_init) {
427 size_t n = confstr(_CS_DARWIN_USER_TEMP_DIR, target, (size_t) PATH_MAX);
428 if ((n > 0) && (n < PATH_MAX)) return true;
429 n = confstr(_CS_DARWIN_USER_CACHE_DIR, target, (size_t) PATH_MAX);
430 if ((n > 0) && (n < PATH_MAX)) return true;
431 } else {
432 /* <rdar://problem/11128080> Can't call confstr during init, so postpone
433 logging till after */
434 postpone_stack_logging();
435 }
436 /* No writeable tmp directory found. Maybe shd try /private/var/tmp for device here ... */
437 *target = '\0';
438 return false;
439 }
440
441 /*
442 * If successful, returns path to log file that was created, otherwise NULL.
443 *
444 * The log could be in one of 3 places (in decreasing order of preference)
445 *
446 * 1) value of environment variable MallocStackLoggingDirectory
447 * 2) the temp directory /tmp for desktop apps and internal apps on devices, or
448 * 3) the sandbox location + tmp/ in case of 3rd party apps on the device.
449 *
450 * For 1 and 3, we create a .link file with the path of the file. We prefer to
451 * create this file in /tmp, but if we are unable to (device 3rd party case),
452 * we create it in the same location as the .index file and issue a message
453 * in syslog asking for it to be copied to /tmp to enable tools.
454 *
455 */
456 static char *
457 create_log_file(void)
458 {
459 pid_t pid = getpid();
460 const char *progname = getprogname();
461 char *created_log_location = NULL;
462
463 if (stack_log_path_buffers == NULL) {
464 /*
465 * on first use, allocate buffers directly from the OS without
466 * using malloc
467 */
468
469 stack_log_path_buffers = allocate_pages((uint64_t)round_page(3*PATH_MAX));
470 if (stack_log_path_buffers == NULL) {
471 _malloc_printf(ASL_LEVEL_INFO, "unable to allocate memory for path buffers\n");
472 return NULL;
473 }
474
475 stack_log_location = &((char *)stack_log_path_buffers)[0*PATH_MAX];
476 stack_log_reference_file = &((char *)stack_log_path_buffers)[1*PATH_MAX];
477 __stack_log_file_path__ = &((char *)stack_log_path_buffers)[2*PATH_MAX];
478 }
479
480 // WARNING! use of snprintf can induce malloc() calls
481 bool use_alternate_location = false;
482 char *evn_log_directory = getenv("MallocStackLoggingDirectory");
483 size_t stack_log_len;
484 if (evn_log_directory && *evn_log_directory) {
485 use_alternate_location = true;
486 strlcpy(stack_log_location, evn_log_directory, (size_t)PATH_MAX);
487 }
488 if (!use_alternate_location || (access(stack_log_location, W_OK) == -1)) {
489 if (!get_writeable_temp_dir(stack_log_location)) {
490 if (!stack_logging_postponed) {
491 _malloc_printf(ASL_LEVEL_INFO, "No writeable tmp dir\n");
492 }
493 return NULL;
494 }
495 if (0 != strcmp(stack_log_location, _PATH_TMP))
496 use_alternate_location = true;
497 }
498 stack_log_len = strlen(stack_log_location);
499 // add the '/' only if it's not already there.
500 if (stack_log_location[stack_log_len-1] != '/') {
501 strlcat(stack_log_location, "/", (size_t)PATH_MAX);
502 ++stack_log_len;
503 }
504
505 strlcpy(__stack_log_file_path__, stack_log_location, (size_t)PATH_MAX);
506
507 strlcat(__stack_log_file_path__, stack_log_file_base_name, (size_t)PATH_MAX);
508 append_int(__stack_log_file_path__, pid, (size_t)PATH_MAX);
509 if (progname && progname[0] != '\0') {
510 strlcat(__stack_log_file_path__, ".", (size_t)PATH_MAX);
511 strlcat(__stack_log_file_path__, progname, (size_t)PATH_MAX);
512 }
513 if (!use_alternate_location) strlcat(__stack_log_file_path__, ".XXXXXX", (size_t)PATH_MAX);
514 strlcat(__stack_log_file_path__, stack_log_file_suffix, (size_t)PATH_MAX);
515
516 // Securely create the log file.
517 if ((index_file_descriptor = mkstemps(__stack_log_file_path__, (int)strlen(stack_log_file_suffix))) != -1) {
518 _malloc_printf(ASL_LEVEL_INFO, "stack logs being written into %s\n", __stack_log_file_path__);
519 created_log_location = __stack_log_file_path__;
520 } else {
521 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack logs at %s\n", stack_log_location);
522 if (use_alternate_location) delete_logging_file(stack_log_reference_file);
523 stack_log_reference_file[0] = '\0';
524 stack_log_location[0] = '\0';
525 __stack_log_file_path__[0] = '\0';
526 created_log_location = NULL;
527 return created_log_location;
528 }
529
530 // in the case where the user has specified an alternate location, drop a reference file
531 // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
532 // stack logging file there.
533 bool use_alternate_link_location = false;
534 if (use_alternate_location) {
535 strlcpy(stack_log_reference_file, _PATH_TMP, (size_t)PATH_MAX);
536 if (access(stack_log_reference_file, W_OK) == -1) {
537 strlcpy(stack_log_reference_file, stack_log_location, (size_t)PATH_MAX);
538 use_alternate_link_location = true;
539 }
540 strlcat(stack_log_reference_file, stack_log_file_base_name, (size_t)PATH_MAX);
541 append_int(stack_log_reference_file, pid, (size_t)PATH_MAX);
542 if (progname && progname[0] != '\0') {
543 strlcat(stack_log_reference_file, ".", (size_t)PATH_MAX);
544 strlcat(stack_log_reference_file, progname, (size_t)PATH_MAX);
545 }
546 if (!use_alternate_link_location)
547 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
548 strlcat(stack_log_reference_file, ".XXXXXX", (size_t)PATH_MAX);
549 strlcat(stack_log_reference_file, stack_log_link_suffix, (size_t)PATH_MAX);
550
551 int link_file_descriptor = mkstemps(stack_log_reference_file, (int)strlen(stack_log_link_suffix));
552 if (link_file_descriptor == -1) {
553 _malloc_printf(ASL_LEVEL_INFO, "unable to create stack reference file %s at %s\n",
554 stack_log_reference_file, stack_log_location);
555 } else {
556 ssize_t written = write(link_file_descriptor, __stack_log_file_path__, strlen(__stack_log_file_path__));
557 if (written < (ssize_t)strlen(__stack_log_file_path__)) {
558 _malloc_printf(ASL_LEVEL_INFO, "unable to write to stack reference file %s at %s\n",
559 stack_log_reference_file, stack_log_location);
560 } else {
561 const char *description_string = "\n(This is a reference file to the stack logs at the path above.)\n";
562 write(link_file_descriptor, description_string, strlen(description_string));
563 }
564 }
565 close(link_file_descriptor);
566 }
567 if (use_alternate_link_location) {
568 _malloc_printf(ASL_LEVEL_INFO, "Please issue: cp %s %s\n", stack_log_reference_file, _PATH_TMP);
569 }
570 return created_log_location;
571 }
572
573 // Check to see if the log file is actually a reference to another location
574 static int
575 log_file_is_reference(char *log_location, char *out_reference_loc_buffer, size_t max_reference_path_size)
576 {
577 if (log_location == NULL || log_location[0] == '\0') return 0;
578
579 size_t log_len = strlen(log_location);
580 size_t link_suffix_len = strlen(stack_log_link_suffix);
581 if (log_len < link_suffix_len || strncmp(log_location+log_len-link_suffix_len, stack_log_link_suffix, link_suffix_len) != 0) {
582 // not a reference file.
583 return 0;
584 }
585
586 if (!out_reference_loc_buffer || max_reference_path_size == 0) return 1;
587
588 FILE *reference_file = fopen(log_location, "r");
589 if (reference_file == NULL) {
590 // if unable to open the file, it may be because another user created it; no need to warn.
591 out_reference_loc_buffer[0] = '\0';
592 return 1;
593 }
594
595 char *ret = fgets(out_reference_loc_buffer, (int)max_reference_path_size, reference_file);
596 if (!ret) {
597 out_reference_loc_buffer[0] = '\0';
598 _malloc_printf(ASL_LEVEL_INFO, "unable to read from stack logging reference file at %s\n", log_location);
599 return 1;
600 } else {
601 size_t read_line_len = strlen(out_reference_loc_buffer);
602 if (read_line_len >= 1 && out_reference_loc_buffer[read_line_len-1] == '\n') {
603 out_reference_loc_buffer[read_line_len-1] = '\0';
604 }
605 }
606
607 fclose(reference_file);
608
609 return 1;
610 }
611
612 // This function may be called from either the target process when exiting, or from either the the target process or
613 // a stack log analysis process, when reaping orphaned stack log files.
614 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
615 static int
616 delete_logging_file(char *log_location)
617 {
618 if (log_location == NULL || log_location[0] == '\0') return 0;
619
620 struct stat statbuf;
621 if (unlink(log_location) != 0 && stat(log_location, &statbuf) == 0) {
622 return -1;
623 }
624 return 0;
625 }
626
627 // This function will be called from atexit() in the target process.
628 static void
629 delete_log_files(void)
630 {
631 if (__stack_log_file_path__ && __stack_log_file_path__[0]) {
632 if (delete_logging_file(__stack_log_file_path__) == 0) {
633 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", __stack_log_file_path__);
634 __stack_log_file_path__[0] = '\0';
635 } else {
636 _malloc_printf(ASL_LEVEL_INFO, "unable to delete stack logs from %s\n", __stack_log_file_path__);
637 }
638 }
639 if (stack_log_reference_file && stack_log_reference_file[0]) {
640 delete_logging_file(stack_log_reference_file);
641 }
642 }
643
644 static bool
645 is_process_running(pid_t pid)
646 {
647 struct kinfo_proc kpt[1];
648 size_t size = sizeof(struct kinfo_proc);
649 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
650
651 sysctl(mib, 4, kpt, &size, NULL, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
652
653 return (size==sizeof(struct kinfo_proc));
654 }
655
656 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
657 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
658 // So, reap any stack log files for processes that no longer exist.
659 //
660 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
661 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
662 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
663 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
664 // its own log files.
665 static void
666 reap_orphaned_log_files(bool remove_for_this_pid)
667 {
668 DIR *dp;
669 struct dirent *entry;
670 char prefix_name[PATH_MAX];
671 char pathname[PATH_MAX];
672 pid_t current_pid = getpid();
673
674 if ((dp = opendir(_PATH_TMP)) == NULL) {
675 return;
676 }
677
678 strlcpy(prefix_name, stack_log_file_base_name, (size_t)PATH_MAX);
679 size_t prefix_length = strlen(prefix_name);
680
681 while ( (entry = readdir(dp)) != NULL ) {
682 if ( entry->d_type != DT_DIR && entry->d_type != DT_LNK && ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) ) {
683 long pid = strtol(&entry->d_name[prefix_length], (char **)NULL, 10);
684 if ( (! is_process_running((pid_t)pid)) || (remove_for_this_pid && (pid_t)pid == current_pid) ) {
685 strlcpy(pathname, _PATH_TMP, (size_t)PATH_MAX);
686 strlcat(pathname, entry->d_name, (size_t)PATH_MAX);
687 char reference_file_buffer[PATH_MAX];
688 bool pathname_is_ref_file = false;
689 if (log_file_is_reference(pathname, reference_file_buffer, (size_t)PATH_MAX) && *reference_file_buffer) {
690 pathname_is_ref_file = true;
691 if (delete_logging_file(reference_file_buffer) == 0) {
692 if (remove_for_this_pid && pid == current_pid) {
693 _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", reference_file_buffer);
694 } else {
695 _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, reference_file_buffer);
696 }
697 }
698 }
699 if (delete_logging_file(pathname) == 0) {
700 if (remove_for_this_pid && pid == current_pid) {
701 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "stack logs deleted from %s\n", pathname);
702 } else {
703 if (!pathname_is_ref_file) _malloc_printf(ASL_LEVEL_INFO, "process %ld no longer exists, stack logs deleted from %s\n", pid, pathname);
704 }
705 char shmem_name_string[PATH_MAX];
706 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
707 append_int(shmem_name_string, (pid_t)pid, (size_t)PATH_MAX);
708 if (pid != current_pid) shm_unlink(shmem_name_string);
709 }
710 }
711 }
712 }
713 closedir(dp);
714 }
715
716 /*
717 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
718 * for disabling any future logging in this process and for informing the user.
719 */
720 static void
721 disable_stack_logging(void)
722 {
723 _malloc_printf(ASL_LEVEL_INFO, "stack logging disabled due to previous errors.\n");
724 stack_logging_enable_logging = 0;
725 malloc_logger = NULL;
726 }
727
728 /* A wrapper around write() that will try to reopen the index/stack file and
729 * write to it if someone closed it underneath us (e.g. the process we just
730 * started decide to close all file descriptors except stin/err/out). Some
731 * programs like to do that and calling abort() on them is rude.
732 */
733 static ssize_t
734 robust_write(int fd, const void *buf, size_t nbyte) {
735 extern int errno;
736 ssize_t written = write(fd, buf, nbyte);
737 if (written == -1 && errno == EBADF) {
738 char *file_to_reopen = NULL;
739 int *fd_to_reset = NULL;
740
741 // descriptor was closed on us. We need to reopen it
742 if (fd == index_file_descriptor) {
743 file_to_reopen = __stack_log_file_path__;
744 fd_to_reset = &index_file_descriptor;
745 } else {
746 // We don't know about this file. Return (and abort()).
747 _malloc_printf(ASL_LEVEL_INFO, "Unknown file descriptor; expecting stack logging index file\n");
748 return -1;
749 }
750
751 // The file *should* already exist. If not, fail.
752 fd = open(file_to_reopen, O_WRONLY | O_APPEND);
753 if (fd < 3) {
754 // If we somehow got stdin/out/err, we need to relinquish them and
755 // get another fd.
756 int fds_to_close[3] = { 0 };
757 while (fd < 3) {
758 if (fd == -1) {
759 _malloc_printf(ASL_LEVEL_INFO, "unable to re-open stack logging file %s\n", file_to_reopen);
760 delete_log_files();
761 return -1;
762 }
763 fds_to_close[fd] = 1;
764 fd = dup(fd);
765 }
766
767 // We have an fd we like. Close the ones we opened.
768 if (fds_to_close[0]) close(0);
769 if (fds_to_close[1]) close(1);
770 if (fds_to_close[2]) close(2);
771 }
772
773 *fd_to_reset = fd;
774 written = write(fd, buf, nbyte);
775 }
776 return written;
777 }
778
779 static void
780 flush_data(void)
781 {
782 ssize_t written; // signed size_t
783 size_t remaining;
784 char * p;
785
786 if (index_file_descriptor == -1) {
787 if (create_log_file() == NULL) {
788 return;
789 }
790 }
791
792 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
793 p = pre_write_buffers->index_buffer;
794 remaining = (size_t)pre_write_buffers->next_free_index_buffer_offset;
795 while (remaining > 0) {
796 written = robust_write(index_file_descriptor, p, remaining);
797 if (written == -1) {
798 _malloc_printf(ASL_LEVEL_INFO, "Unable to write to stack logging file %s (%s)\n",
799 __stack_log_file_path__, strerror(errno));
800 disable_stack_logging();
801 return;
802 }
803 p += written;
804 remaining -= written;
805 }
806
807 pre_write_buffers->start_index_offset += pre_write_buffers->next_free_index_buffer_offset;
808 pre_write_buffers->next_free_index_buffer_offset = 0;
809 }
810
811 static void
812 prepare_to_log_stacks(void)
813 {
814 if (!pre_write_buffers) {
815 last_logged_malloc_address = 0ul;
816 logging_use_compaction = (stack_logging_dontcompact ? 0 : logging_use_compaction);
817
818 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
819 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
820 // the contents of these buffers don't change while being inspected.
821 char shmem_name_string[PATH_MAX];
822 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
823 append_int(shmem_name_string, getpid(), (size_t)PATH_MAX);
824
825 int shmid = shm_open(shmem_name_string, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
826 if (shmid < 0) {
827 // Failed to create shared memory region; turn off stack logging.
828 _malloc_printf(ASL_LEVEL_INFO, "error while allocating shared memory for disk-based stack logging output buffers\n");
829 disable_stack_logging();
830 return;
831 }
832
833 size_t full_shared_mem_size = sizeof(stack_buffer_shared_memory);
834 ftruncate(shmid, (off_t)full_shared_mem_size);
835 pre_write_buffers = (stack_buffer_shared_memory*)mmap(0, full_shared_mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
836 close(shmid);
837
838 if (MAP_FAILED == pre_write_buffers) {
839 _malloc_printf(ASL_LEVEL_INFO, "error mapping in shared memory for disk-based stack logging output buffers\n");
840 disable_stack_logging();
841 return;
842 }
843
844 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
845 pre_write_buffers->start_index_offset = 0ull;
846 pre_write_buffers->next_free_index_buffer_offset = 0;
847
848 // create the backtrace uniquing table
849 pre_write_buffers->uniquing_table = __create_uniquing_table();
850 pre_write_buffers->uniquing_table_address = (mach_vm_address_t)(uintptr_t)pre_write_buffers->uniquing_table;
851 if (!pre_write_buffers->uniquing_table) {
852 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack uniquing table\n");
853 disable_stack_logging();
854 return;
855 }
856
857 uint64_t stack_buffer_sz = (uint64_t)round_page(sizeof(vm_address_t) * STACK_LOGGING_MAX_STACK_SIZE);
858 stack_buffer = (vm_address_t*)allocate_pages(stack_buffer_sz);
859 if (!stack_buffer) {
860 _malloc_printf(ASL_LEVEL_INFO, "error while allocating stack trace buffer\n");
861 disable_stack_logging();
862 return;
863 }
864
865 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
866 atexit(delete_log_files); // atexit() can call malloc()
867 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
868
869 // this call ensures that the log files exist; analyzing processes will rely on this assumption.
870 if (create_log_file() == NULL) {
871 /* postponement support requires cleaning up these structures now */
872 __destroy_uniquing_table(pre_write_buffers->uniquing_table);
873 deallocate_pages(stack_buffer, stack_buffer_sz);
874 stack_buffer = NULL;
875
876 munmap(pre_write_buffers, full_shared_mem_size);
877 pre_write_buffers = NULL;
878
879 if (!stack_logging_postponed) {
880 disable_stack_logging();
881 }
882 return;
883 }
884 }
885 }
886
887 void
888 __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
889 {
890 if (!stack_logging_enable_logging || stack_logging_postponed) return;
891
892 // check incoming data
893 if (type_flags & stack_logging_type_alloc && type_flags & stack_logging_type_dealloc) {
894 uintptr_t swapper = size;
895 size = ptr_arg;
896 ptr_arg = swapper;
897 if (ptr_arg == return_val) return; // realloc had no effect, skipping
898
899 if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
900 type_flags ^= stack_logging_type_dealloc;
901 } else {
902 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
903 __disk_stack_logging_log_stack(stack_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
904 __disk_stack_logging_log_stack(stack_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
905 return;
906 }
907 }
908 if (type_flags & stack_logging_type_dealloc) {
909 if (size) {
910 ptr_arg = size;
911 size = 0;
912 } else return; // free(nil)
913 }
914 if (type_flags & stack_logging_type_alloc && return_val == 0) return; // alloc that failed
915
916 type_flags &= 0x7;
917
918 // now actually begin
919 prepare_to_log_stacks();
920
921 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
922 if (!stack_logging_enable_logging || stack_logging_postponed) return;
923
924 vm_address_t self_thread = (vm_address_t)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
925
926 // lock and enter
927 OSSpinLockLock(&stack_logging_lock);
928
929 if (!stack_logging_enable_logging) {
930 OSSpinLockUnlock(&stack_logging_lock);
931 return;
932 }
933
934 // compaction
935 if (last_logged_malloc_address && (type_flags & stack_logging_type_dealloc) && STACK_LOGGING_DISGUISE(ptr_arg) == last_logged_malloc_address) {
936 // *waves hand* the last allocation never occurred
937 pre_write_buffers->next_free_index_buffer_offset -= (uint32_t)sizeof(stack_logging_index_event);
938 last_logged_malloc_address = 0ul;
939
940 OSSpinLockUnlock(&stack_logging_lock);
941 return;
942 }
943
944 // gather stack
945 uint32_t count;
946 thread_stack_pcs(stack_buffer, STACK_LOGGING_MAX_STACK_SIZE-1, &count); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
947 stack_buffer[count++] = self_thread + 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
948 num_hot_to_skip += 2;
949 if (count <= num_hot_to_skip) {
950 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
951 OSSpinLockUnlock(&stack_logging_lock);
952 return;
953 }
954
955 // unique stack in memory
956 count -= num_hot_to_skip;
957 #if __LP64__
958 mach_vm_address_t *frames = (mach_vm_address_t*)stack_buffer + num_hot_to_skip;
959 #else
960 mach_vm_address_t frames[STACK_LOGGING_MAX_STACK_SIZE];
961 uint32_t i;
962 for (i = 0; i < count; i++) {
963 frames[i] = stack_buffer[i+num_hot_to_skip];
964 }
965 #endif
966
967 uint64_t uniqueStackIdentifier = (uint64_t)(-1ll);
968 while (!__enter_frames_in_table(pre_write_buffers->uniquing_table, &uniqueStackIdentifier, frames, (int32_t)count)) {
969 __expand_uniquing_table(pre_write_buffers->uniquing_table);
970 }
971
972 stack_logging_index_event current_index;
973 if (type_flags & stack_logging_type_alloc) {
974 current_index.address = STACK_LOGGING_DISGUISE(return_val);
975 current_index.argument = size;
976 if (logging_use_compaction) {
977 last_logged_malloc_address = current_index.address; // disguised
978 }
979 } else {
980 current_index.address = STACK_LOGGING_DISGUISE(ptr_arg);
981 current_index.argument = 0ul;
982 last_logged_malloc_address = 0ul;
983 }
984 current_index.offset_and_flags = STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier, type_flags);
985
986 // the following line is a good debugging tool for logging each allocation event as it happens.
987 // malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
988
989 // flush the data buffer to disk if necessary
990 if (pre_write_buffers->next_free_index_buffer_offset + sizeof(stack_logging_index_event) >= STACK_LOGGING_BLOCK_WRITING_SIZE) {
991 flush_data();
992 }
993
994 // store bytes in buffers
995 memcpy(pre_write_buffers->index_buffer+pre_write_buffers->next_free_index_buffer_offset, &current_index, sizeof(stack_logging_index_event));
996 pre_write_buffers->next_free_index_buffer_offset += (uint32_t)sizeof(stack_logging_index_event);
997
998 OSSpinLockUnlock(&stack_logging_lock);
999 }
1000
1001 void
1002 __stack_logging_fork_prepare() {
1003 OSSpinLockLock(&stack_logging_lock);
1004 }
1005
1006 void
1007 __stack_logging_fork_parent() {
1008 OSSpinLockUnlock(&stack_logging_lock);
1009 }
1010
1011 void
1012 __stack_logging_fork_child() {
1013 malloc_logger = NULL;
1014 stack_logging_enable_logging = 0;
1015 OSSpinLockUnlock(&stack_logging_lock);
1016 }
1017
1018 void
1019 __stack_logging_early_finished() {
1020 stack_logging_finished_init = 1;
1021 stack_logging_postponed = 0;
1022 }
1023
1024 boolean_t
1025 __stack_logging_locked()
1026 {
1027 bool acquired_lock = OSSpinLockTry(&stack_logging_lock);
1028 if (acquired_lock) OSSpinLockUnlock(&stack_logging_lock);
1029 return (acquired_lock ? false : true);
1030 }
1031
1032 #pragma mark -
1033 #pragma mark Remote Stack Log Access
1034
1035 #pragma mark - Design notes:
1036
1037 /*
1038
1039 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
1040 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
1041 // Gets the last allocation record about address
1042
1043 if !address, will load index and iterate through (expensive)
1044 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
1045 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
1046 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
1047
1048 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
1049 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
1050 // Given a uniqued_stack fills stack_frames_buffer
1051
1052 */
1053
1054 #pragma mark - caching
1055
1056 __attribute__((always_inline)) static inline size_t
1057 hash_index(uint64_t address, size_t max_pos) {
1058 return (size_t)((address >> 2) % (max_pos-1)); // simplicity rules.
1059 }
1060
1061 __attribute__((always_inline)) static inline size_t
1062 hash_multiplier(size_t capacity, uint32_t allowed_collisions) {
1063 return (capacity/(allowed_collisions*2+1));
1064 }
1065
1066 __attribute__((always_inline)) static inline size_t
1067 next_hash(size_t hash, size_t multiplier, size_t capacity, uint32_t collisions) {
1068 hash += multiplier * collisions;
1069 if (hash >= capacity) hash -= capacity;
1070 return hash;
1071 }
1072
1073 static void
1074 transfer_node(remote_index_cache *cache, remote_index_node *old_node)
1075 {
1076 uint32_t collisions = 0;
1077 size_t pos = hash_index(old_node->address, cache->cache_node_capacity);
1078 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1079 do {
1080 if (cache->table_memory[pos].address == old_node->address) { // hit like this shouldn't happen.
1081 fprintf(stderr, "impossible collision! two address==address lists! (transfer_node)\n");
1082 break;
1083 } else if (cache->table_memory[pos].address == 0) { // empty
1084 cache->table_memory[pos] = *old_node;
1085 break;
1086 } else {
1087 collisions++;
1088 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1089 }
1090 } while (collisions <= cache->collision_allowance);
1091
1092 if (collisions > cache->collision_allowance) {
1093 fprintf(stderr, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
1094 }
1095 }
1096
1097 static void
1098 expand_cache(remote_index_cache *cache)
1099 {
1100 // keep old stats
1101 size_t old_node_capacity = cache->cache_node_capacity;
1102 remote_index_node *old_table = cache->table_memory;
1103
1104 // double size
1105 cache->cache_size <<= 2;
1106 cache->cache_node_capacity <<= 2;
1107 cache->collision_allowance += 3;
1108 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1109
1110 // repopulate (expensive!)
1111 size_t i;
1112 for (i = 0; i < old_node_capacity; i++) {
1113 if (old_table[i].address) {
1114 transfer_node(cache, &old_table[i]);
1115 }
1116 }
1117 free(old_table);
1118 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
1119 }
1120
1121 static void
1122 insert_node(remote_index_cache *cache, uint64_t address, uint64_t index_file_offset)
1123 {
1124 uint32_t collisions = 0;
1125 size_t pos = hash_index(address, cache->cache_node_capacity);
1126 size_t multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1127
1128 bool inserted = false;
1129 while (!inserted) {
1130 if (cache->table_memory[pos].address == 0ull || cache->table_memory[pos].address == address) { // hit or empty
1131 cache->table_memory[pos].address = address;
1132 cache->table_memory[pos].index_file_offset = index_file_offset;
1133 inserted = true;
1134 break;
1135 }
1136
1137 collisions++;
1138 pos = next_hash(pos, multiplier, cache->cache_node_capacity, collisions);
1139
1140 if (collisions > cache->collision_allowance) {
1141 expand_cache(cache);
1142 pos = hash_index(address, cache->cache_node_capacity);
1143 multiplier = hash_multiplier(cache->cache_node_capacity, cache->collision_allowance);
1144 collisions = 0;
1145 }
1146 }
1147
1148 }
1149
1150 static void
1151 update_cache_for_file_streams(remote_task_file_streams *descriptors)
1152 {
1153 remote_index_cache *cache = descriptors->cache;
1154
1155 // create from scratch if necessary.
1156 if (!cache) {
1157 descriptors->cache = cache = (remote_index_cache*)calloc((size_t)1, sizeof(remote_index_cache));
1158 cache->cache_node_capacity = 1 << 14;
1159 cache->collision_allowance = 17;
1160 cache->last_index_file_offset = 0;
1161 cache->cache_size = cache->cache_node_capacity*sizeof(remote_index_node);
1162 cache->table_memory = (void*)calloc(cache->cache_node_capacity, sizeof(remote_index_node));
1163
1164 // now map in the shared memory, if possible
1165 char shmem_name_string[PATH_MAX];
1166 strlcpy(shmem_name_string, stack_log_file_base_name, (size_t)PATH_MAX);
1167 append_int(shmem_name_string, descriptors->remote_pid, (size_t)PATH_MAX);
1168
1169 int shmid = shm_open(shmem_name_string, O_RDWR, S_IRUSR | S_IWUSR);
1170 if (shmid >= 0) {
1171 cache->shmem = mmap(0, sizeof(stack_buffer_shared_memory), PROT_READ | PROT_WRITE, MAP_SHARED, shmid, (off_t)0);
1172 close(shmid);
1173 }
1174
1175 if (shmid < 0 || cache->shmem == MAP_FAILED) {
1176 // failed to connect to the shared memory region; warn and continue.
1177 _malloc_printf(ASL_LEVEL_INFO, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1178 }
1179 }
1180
1181 // suspend and see how much updating there is to do. there are three scenarios, listed below
1182 bool update_snapshot = false;
1183 if (descriptors->remote_task != mach_task_self()) {
1184 task_suspend(descriptors->remote_task);
1185 }
1186
1187 struct stat file_statistics;
1188 fstat(fileno(descriptors->index_file_stream), &file_statistics);
1189 size_t read_size = (descriptors->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1190 uint64_t read_this_update = 0;
1191
1192 // the delta indecies is a complex number; there are three cases:
1193 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1194 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1195 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1196 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1197 uint64_t delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1198 uint32_t last_snapshot_scan_index = 0;
1199 if (delta_indecies && cache->shmem) {
1200 // case 3: add cache scanned to known from disk and recalc
1201 cache->last_index_file_offset += cache->snapshot.next_free_index_buffer_offset;
1202 delta_indecies = (file_statistics.st_size - cache->last_index_file_offset) / read_size;
1203 update_snapshot = true;
1204 } else if (cache->shmem) {
1205 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1206 last_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1207 }
1208
1209 // no update necessary for the file; check if need a snapshot.
1210 if (delta_indecies == 0) {
1211 if (cache->shmem && !update_snapshot) {
1212 update_snapshot = (cache->shmem->next_free_index_buffer_offset != cache->snapshot.next_free_index_buffer_offset);
1213 }
1214 }
1215
1216 // if a snapshot is necessary, memcpy from remote frozen process' memory
1217 // note: there were two ways to do this - spin lock or suspend. suspend allows us to
1218 // analyze processes even if they were artificially suspended. with a lock, there'd be
1219 // worry that the target was suspended with the lock taken.
1220 if (update_snapshot) {
1221 memcpy(&cache->snapshot, cache->shmem, sizeof(stack_buffer_shared_memory));
1222 // also need to update our version of the remote uniquing table
1223 vm_address_t local_uniquing_address = 0ul;
1224 mach_msg_type_number_t local_uniquing_size = 0;
1225 mach_vm_size_t desired_size = round_page(sizeof(backtrace_uniquing_table));
1226 kern_return_t err;
1227 if ((err = mach_vm_read(descriptors->remote_task, cache->shmem->uniquing_table_address, desired_size, &local_uniquing_address, &local_uniquing_size)) != KERN_SUCCESS
1228 || local_uniquing_size != desired_size) {
1229 fprintf(stderr, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err, mach_error_string(err));
1230 } else {
1231 // the mach_vm_read was successful, so acquire the uniquing table
1232
1233 // need to re-read the table, so deallocate the current memory
1234 if (cache->uniquing_table.table) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)(cache->uniquing_table.table), cache->uniquing_table.tableSize);
1235
1236 // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
1237 // remote process. this pointer will be mapped shared in a few lines.
1238 cache->uniquing_table = *((backtrace_uniquing_table*)local_uniquing_address);
1239
1240 vm_address_t local_table_address = 0ul;
1241 mach_msg_type_number_t local_table_size = 0;
1242
1243 err = mach_vm_read(descriptors->remote_task, cache->uniquing_table.table_address, cache->uniquing_table.tableSize, &local_table_address, &local_table_size);
1244 if (err == KERN_SUCCESS) cache->uniquing_table.table = (mach_vm_address_t*)local_table_address;
1245 else cache->uniquing_table.table = NULL;
1246
1247 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)local_uniquing_address, (mach_vm_size_t)local_uniquing_size);
1248 }
1249 }
1250
1251 // resume
1252 if (descriptors->remote_task != mach_task_self()) {
1253 task_resume(descriptors->remote_task);
1254 }
1255
1256 if (!update_snapshot && delta_indecies == 0) return; // absolutely no updating needed.
1257
1258 FILE *the_index = (descriptors->index_file_stream);
1259
1260 // prepare for the read; target process could be 32 or 64 bit.
1261
1262 stack_logging_index_event32 *target_32_index = NULL;
1263 stack_logging_index_event64 *target_64_index = NULL;
1264
1265 // perform the update from the file
1266 uint32_t i;
1267 if (delta_indecies) {
1268 char bufferSpace[4096]; // 4 kb
1269 target_32_index = (stack_logging_index_event32*)bufferSpace;
1270 target_64_index = (stack_logging_index_event64*)bufferSpace;
1271 size_t number_slots = (size_t)(4096/read_size);
1272
1273 size_t read_count = 0;
1274 if (fseeko(the_index, (off_t)(cache->last_index_file_offset), SEEK_SET)) {
1275 fprintf(stderr, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1276 }
1277 off_t current_index_position = cache->last_index_file_offset;
1278 do {
1279 number_slots = (size_t)MIN(delta_indecies - read_this_update, number_slots);
1280 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1281 if (descriptors->task_is_64_bit) {
1282 for (i = 0; i < read_count; i++) {
1283 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)current_index_position);
1284 read_this_update++;
1285 current_index_position += read_size;
1286 }
1287 } else {
1288 for (i = 0; i < read_count; i++) {
1289 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)current_index_position);
1290 read_this_update++;
1291 current_index_position += read_size;
1292 }
1293 }
1294 } while (read_count);
1295
1296 if (read_this_update < delta_indecies) {
1297 fprintf(stderr, "insufficient data in remote stack index file; expected more records.\n");
1298 }
1299 cache->last_index_file_offset += read_this_update * read_size;
1300 }
1301
1302 if (update_snapshot) {
1303 target_32_index = (stack_logging_index_event32*)(cache->snapshot.index_buffer);
1304 target_64_index = (stack_logging_index_event64*)(cache->snapshot.index_buffer);
1305
1306 uint32_t free_snapshot_scan_index = cache->snapshot.next_free_index_buffer_offset / (uint32_t)read_size;
1307 off_t current_index_position = cache->snapshot.start_index_offset;
1308 if (descriptors->task_is_64_bit) {
1309 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1310 insert_node(cache, STACK_LOGGING_DISGUISE(target_64_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1311 }
1312 } else {
1313 for (i = last_snapshot_scan_index; i < free_snapshot_scan_index; i++) {
1314 insert_node(cache, (mach_vm_address_t)STACK_LOGGING_DISGUISE(target_32_index[i].address), (uint64_t)(current_index_position + (i * read_size)));
1315 }
1316 }
1317 }
1318 }
1319
1320 static void
1321 destroy_cache_for_file_streams(remote_task_file_streams *descriptors)
1322 {
1323 if (descriptors->cache->shmem) {
1324 munmap(descriptors->cache->shmem, sizeof(stack_buffer_shared_memory));
1325 }
1326 free(descriptors->cache->table_memory);
1327 free(descriptors->cache);
1328 descriptors->cache = NULL;
1329 }
1330
1331 #pragma mark - internal
1332
1333 // In the stack log analysis process, find the stack logging files for target process <pid>
1334 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1335 // If we find such a directory then open the stack logging files in there.
1336 // We might also have been passed the file path if the client first read it from __stack_log_file_path__
1337 // global variable in the target task, as will be needed if the .link cannot be put in /tmp.
1338 static void
1339 open_log_files(pid_t pid, char* file_path, remote_task_file_streams *this_task_streams)
1340 {
1341 DIR *dp;
1342 struct dirent *entry;
1343 char prefix_name[PATH_MAX];
1344 char pathname[PATH_MAX];
1345
1346 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1347
1348 if (file_path != NULL) {
1349 this_task_streams->index_file_stream = fopen(file_path, "r");
1350 return;
1351 }
1352
1353 if ((dp = opendir(_PATH_TMP)) == NULL) {
1354 return;
1355 }
1356
1357 // It's OK to use snprintf in this routine since it should only be called by the clients
1358 // of stack logging, and thus calls to malloc are OK.
1359 snprintf(prefix_name, (size_t)PATH_MAX, "%s%d.", stack_log_file_base_name, pid); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1360 size_t prefix_length = strlen(prefix_name);
1361
1362 while ( (entry = readdir(dp)) != NULL ) {
1363 if ( strncmp( entry->d_name, prefix_name, prefix_length) == 0 ) {
1364 snprintf(pathname, (size_t)PATH_MAX, "%s%s", _PATH_TMP, entry->d_name);
1365 char reference_file[PATH_MAX];
1366 if (log_file_is_reference(pathname, reference_file, (size_t)PATH_MAX)) {
1367 this_task_streams->index_file_stream = fopen(reference_file, "r");
1368 } else {
1369 this_task_streams->index_file_stream = fopen(pathname, "r");
1370 }
1371
1372 break;
1373 }
1374 }
1375 closedir(dp);
1376 }
1377
1378 static remote_task_file_streams*
1379 retain_file_streams_for_task(task_t task, char* file_path)
1380 {
1381 if (task == MACH_PORT_NULL) return NULL;
1382
1383 OSSpinLockLock(&remote_fd_list_lock);
1384
1385 // see if they're already in use
1386 uint32_t i = 0;
1387 for (i = 0; i < remote_task_fd_count; i++) {
1388 if (remote_fds[i].remote_task == task) {
1389 remote_fds[i].in_use_count++;
1390 OSSpinLockUnlock(&remote_fd_list_lock);
1391 return &remote_fds[i];
1392 }
1393 }
1394
1395 // open them
1396 uint32_t failures = 0;
1397 if (remote_task_fd_count == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1398 while (remote_fds[next_remote_task_fd].in_use_count > 0) {
1399 next_remote_task_fd++;
1400 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1401 failures++;
1402 if (failures >= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) {
1403 OSSpinLockUnlock(&remote_fd_list_lock);
1404 return NULL;
1405 }
1406 }
1407 fclose(remote_fds[next_remote_task_fd].index_file_stream);
1408 destroy_cache_for_file_streams(&remote_fds[next_remote_task_fd]);
1409 }
1410
1411 pid_t pid;
1412 kern_return_t err = pid_for_task(task, &pid);
1413 if (err != KERN_SUCCESS) {
1414 OSSpinLockUnlock(&remote_fd_list_lock);
1415 return NULL;
1416 }
1417
1418 remote_task_file_streams *this_task_streams = &remote_fds[next_remote_task_fd];
1419
1420 open_log_files(pid, file_path, this_task_streams);
1421
1422 // check if opens failed
1423 if (this_task_streams->index_file_stream == NULL) {
1424 if (this_task_streams->index_file_stream) fclose(this_task_streams->index_file_stream);
1425 OSSpinLockUnlock(&remote_fd_list_lock);
1426 return NULL;
1427 }
1428
1429 // check if target pid is running 64-bit
1430 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1431 struct kinfo_proc processInfo;
1432 size_t bufsize = sizeof(processInfo);
1433 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, (size_t)0) == 0 && bufsize > 0) {
1434 this_task_streams->task_is_64_bit = processInfo.kp_proc.p_flag & P_LP64;
1435 } else {
1436 this_task_streams->task_is_64_bit = 0;
1437 }
1438
1439 // otherwise set vars and go
1440 this_task_streams->in_use_count = 1;
1441 this_task_streams->remote_task = task;
1442 this_task_streams->remote_pid = pid;
1443 next_remote_task_fd++;
1444 if (next_remote_task_fd == STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED) next_remote_task_fd = 0;
1445 remote_task_fd_count = MIN(remote_task_fd_count + 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED);
1446
1447 OSSpinLockUnlock(&remote_fd_list_lock);
1448 return this_task_streams;
1449 }
1450
1451 static void
1452 release_file_streams_for_task(task_t task)
1453 {
1454 OSSpinLockLock(&remote_fd_list_lock);
1455
1456 // decrement in-use count
1457 uint32_t i = 0;
1458 for (i = 0; i < remote_task_fd_count; i++) {
1459 if (remote_fds[i].remote_task == task) {
1460 remote_fds[i].in_use_count--;
1461 break;
1462 }
1463 }
1464
1465 OSSpinLockUnlock(&remote_fd_list_lock);
1466 }
1467
1468 #pragma mark - extern
1469
1470 //
1471 // The following is used by client tools like malloc_history and Instruments to pass along the path
1472 // of the index file as read from the target task's __stack_log_file_path__ variable (set in this file)
1473 // Eventually, at a suitable point, this additional argument should just be added to the other APIs below.
1474 //
1475 kern_return_t
1476 __mach_stack_logging_set_file_path(task_t task, char* file_path)
1477 {
1478 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, file_path);
1479 if (remote_fd == NULL) {
1480 return KERN_FAILURE;
1481 }
1482 return KERN_SUCCESS;
1483 }
1484
1485 kern_return_t
1486 __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1487 {
1488 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1489 if (remote_fd == NULL) {
1490 return KERN_FAILURE;
1491 }
1492
1493 update_cache_for_file_streams(remote_fd);
1494
1495 uint32_t collisions = 0;
1496 size_t hash = hash_index(address, remote_fd->cache->cache_node_capacity);
1497 size_t multiplier = hash_multiplier(remote_fd->cache->cache_node_capacity, remote_fd->cache->collision_allowance);
1498 uint64_t located_file_position = 0;
1499
1500 bool found = false;
1501 do {
1502 if (remote_fd->cache->table_memory[hash].address == address) { // hit!
1503 located_file_position = remote_fd->cache->table_memory[hash].index_file_offset;
1504 found = true;
1505 break;
1506 } else if (remote_fd->cache->table_memory[hash].address == 0ull) { // failure!
1507 break;
1508 }
1509
1510 collisions++;
1511 hash = next_hash(hash, multiplier, remote_fd->cache->cache_node_capacity, collisions);
1512
1513 } while (collisions <= remote_fd->cache->collision_allowance);
1514
1515 if (found) {
1516 // prepare for the read; target process could be 32 or 64 bit.
1517 stack_logging_index_event32 *target_32_index = NULL;
1518 stack_logging_index_event64 *target_64_index = NULL;
1519
1520 if (located_file_position >= remote_fd->cache->last_index_file_offset) {
1521 // must be in shared memory
1522 if (remote_fd->cache->shmem) {
1523 if (remote_fd->task_is_64_bit) {
1524 target_64_index = (stack_logging_index_event64*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1525 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1526 } else {
1527 target_32_index = (stack_logging_index_event32*)(remote_fd->cache->snapshot.index_buffer + (located_file_position - remote_fd->cache->snapshot.start_index_offset));
1528 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1529 }
1530 } else {
1531 found = false;
1532 }
1533
1534 } else {
1535 // it's written to disk
1536 char bufferSpace[128];
1537
1538 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1539 fseeko(remote_fd->index_file_stream, (off_t)located_file_position, SEEK_SET);
1540 size_t read_count = fread(bufferSpace, read_size, (size_t)1, remote_fd->index_file_stream);
1541 if (read_count) {
1542 if (remote_fd->task_is_64_bit) {
1543 target_64_index = (stack_logging_index_event64*)bufferSpace;
1544 located_file_position = STACK_LOGGING_OFFSET(target_64_index->offset_and_flags);
1545 } else {
1546 target_32_index = (stack_logging_index_event32*)bufferSpace;
1547 located_file_position = STACK_LOGGING_OFFSET(target_32_index->offset_and_flags);
1548 }
1549 } else {
1550 found = false;
1551 }
1552 }
1553 }
1554
1555 release_file_streams_for_task(task);
1556
1557 if (!found) {
1558 return KERN_FAILURE;
1559 }
1560
1561 return __mach_stack_logging_frames_for_uniqued_stack(task, located_file_position, stack_frames_buffer, max_stack_frames, count);
1562 }
1563
1564
1565 kern_return_t
1566 __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context)
1567 {
1568 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1569 if (remote_fd == NULL) {
1570 return KERN_FAILURE;
1571 }
1572
1573 bool reading_all_addresses = (address == 0 ? true : false);
1574 mach_stack_logging_record_t pass_record;
1575 kern_return_t err = KERN_SUCCESS;
1576
1577 // update (read index file once and only once)
1578 update_cache_for_file_streams(remote_fd);
1579
1580 FILE *the_index = (remote_fd->index_file_stream);
1581
1582 // prepare for the read; target process could be 32 or 64 bit.
1583 char bufferSpace[2048]; // 2 kb
1584 stack_logging_index_event32 *target_32_index = (stack_logging_index_event32*)bufferSpace;
1585 stack_logging_index_event64 *target_64_index = (stack_logging_index_event64*)bufferSpace;
1586 uint32_t target_addr_32 = (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address);
1587 uint64_t target_addr_64 = STACK_LOGGING_DISGUISE((uint64_t)address);
1588 size_t read_size = (remote_fd->task_is_64_bit ? sizeof(stack_logging_index_event64) : sizeof(stack_logging_index_event32));
1589 size_t number_slots = (size_t)(2048/read_size);
1590 uint64_t total_slots = remote_fd->cache->last_index_file_offset / read_size;
1591
1592 // perform the search
1593 size_t read_count = 0;
1594 int64_t current_file_offset = 0;
1595 uint32_t i;
1596 do {
1597 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1598 // and crop file reading to the point where we last scanned
1599 number_slots = (size_t)MIN(number_slots, total_slots);
1600
1601 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1602 if (number_slots == 0) {
1603 if (remote_fd->cache->shmem && remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset > (uint64_t)current_file_offset) {
1604 // use shared memory
1605 target_32_index = (stack_logging_index_event32*)remote_fd->cache->snapshot.index_buffer;
1606 target_64_index = (stack_logging_index_event64*)remote_fd->cache->snapshot.index_buffer;
1607 read_count = (uint32_t)(remote_fd->cache->snapshot.start_index_offset + remote_fd->cache->snapshot.next_free_index_buffer_offset - current_file_offset) / read_size;
1608 current_file_offset += read_count * read_size;
1609 } else {
1610 break;
1611 }
1612 } else {
1613 // get and save index (enumerator could modify)
1614 fseeko(the_index, current_file_offset, SEEK_SET);
1615 read_count = fread(bufferSpace, read_size, number_slots, the_index);
1616 current_file_offset = ftello(the_index);
1617 total_slots -= read_count;
1618 }
1619
1620 if (remote_fd->task_is_64_bit) {
1621 for (i = 0; i < read_count; i++) {
1622 if (reading_all_addresses || target_64_index[i].address == target_addr_64) {
1623 pass_record.address = STACK_LOGGING_DISGUISE(target_64_index[i].address);
1624 pass_record.argument = target_64_index[i].argument;
1625 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_64_index[i].offset_and_flags);
1626 pass_record.type_flags = STACK_LOGGING_FLAGS(target_64_index[i].offset_and_flags);
1627 enumerator(pass_record, context);
1628 }
1629 }
1630 } else {
1631 for (i = 0; i < read_count; i++) {
1632 if (reading_all_addresses || target_32_index[i].address == target_addr_32) {
1633 pass_record.address = STACK_LOGGING_DISGUISE(target_32_index[i].address);
1634 pass_record.argument = target_32_index[i].argument;
1635 pass_record.stack_identifier = STACK_LOGGING_OFFSET(target_32_index[i].offset_and_flags);
1636 pass_record.type_flags = STACK_LOGGING_FLAGS(target_32_index[i].offset_and_flags);
1637 enumerator(pass_record, context);
1638 }
1639 }
1640 }
1641 } while (read_count);
1642
1643 release_file_streams_for_task(task);
1644 return err;
1645 }
1646
1647
1648 kern_return_t
1649 __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count)
1650 {
1651 remote_task_file_streams *remote_fd = retain_file_streams_for_task(task, NULL);
1652 if (remote_fd == NULL) return KERN_FAILURE;
1653
1654 __unwind_stack_from_table_index(&remote_fd->cache->uniquing_table, stack_identifier, stack_frames_buffer, count, max_stack_frames);
1655
1656 release_file_streams_for_task(task);
1657
1658 if (*count) return KERN_SUCCESS;
1659 else return KERN_FAILURE;
1660 }
1661
1662
1663 #ifdef TEST_DISK_STACK_LOGGING
1664
1665 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1666
1667 #include <sys/wait.h>
1668
1669 int
1670 main()
1671 {
1672 int status;
1673 int i;
1674 size_t total_globals = 0ul;
1675
1676 fprintf(stderr, "master test process is %d\n", getpid());
1677 fprintf(stderr, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers)); total_globals += sizeof(pre_write_buffers);
1678 fprintf(stderr, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer)); total_globals += sizeof(stack_buffer);
1679 fprintf(stderr, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address)); total_globals += sizeof(last_logged_malloc_address);
1680 fprintf(stderr, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name)); total_globals += sizeof(stack_log_file_base_name);
1681 fprintf(stderr, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix)); total_globals += sizeof(stack_log_file_suffix);
1682 fprintf(stderr, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix)); total_globals += sizeof(stack_log_link_suffix);
1683 fprintf(stderr, "sizeof stack_log_location: %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1684 fprintf(stderr, "sizeof stack_log_reference_file: %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1685 fprintf(stderr, "sizeof __stack_log_file_path__ (index_file_path): %lu\n", (size_t)PATH_MAX); total_globals += (size_t)PATH_MAX;
1686 fprintf(stderr, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor)); total_globals += sizeof(index_file_descriptor);
1687 fprintf(stderr, "sizeof remote_fds: %lu\n", sizeof(remote_fds)); total_globals += sizeof(remote_fds);
1688 fprintf(stderr, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd)); total_globals += sizeof(next_remote_task_fd);
1689 fprintf(stderr, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count)); total_globals += sizeof(remote_task_fd_count);
1690 fprintf(stderr, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock)); total_globals += sizeof(remote_fd_list_lock);
1691 fprintf(stderr, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction)); total_globals += sizeof(logging_use_compaction);
1692
1693 fprintf(stderr, "size of all global data: %lu\n", total_globals);
1694
1695 create_log_file();
1696
1697 // create a few child processes and exit them cleanly so their logs should get cleaned up
1698 fprintf(stderr, "\ncreating child processes and exiting cleanly\n");
1699 for (i = 0; i < 3; i++) {
1700 if (fork() == 0) {
1701 fprintf(stderr, "\nin child processes %d\n", getpid());
1702 create_log_file();
1703 fprintf(stderr, "exiting child processes %d\n", getpid());
1704 exit(1);
1705 }
1706 wait(&status);
1707 }
1708
1709 // create a few child processes and abruptly _exit them, leaving their logs around
1710 fprintf(stderr, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1711 for (i = 0; i < 3; i++) {
1712 if (fork() == 0) {
1713 fprintf(stderr, "\nin child processes %d\n", getpid());
1714 create_log_file();
1715 fprintf(stderr, "exiting child processes %d\n", getpid());
1716 _exit(1);
1717 }
1718 wait(&status);
1719 }
1720
1721 // this should reap any remaining logs
1722 fprintf(stderr, "\nexiting master test process %d\n", getpid());
1723 delete_log_files();
1724 return 0;
1725 }
1726
1727 #endif