2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
31 #include <libkern/OSAtomic.h>
32 #include <mach/mach.h>
33 #include <mach/mach_vm.h>
34 #include <sys/sysctl.h>
40 #include "stack_logging.h"
41 #include "malloc_printf.h"
42 #include "_simple.h" // as included by malloc.c, this defines ASL_LEVEL_INFO
47 #ifdef TEST_DISK_STACK_LOGGING
48 #define _malloc_printf fprintf
50 #define ASL_LEVEL_INFO stderr
53 #define STACK_LOGGING_MAX_STACK_SIZE 512
54 #define STACK_LOGGING_BLOCK_WRITING_SIZE 8192
55 #define STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED 3
57 #define BACKTRACE_UNIQUING_DEBUG 0
59 // The expansion factor controls the shifting up of table size. A factor of 1 will double the size upon expanding,
60 // 2 will quadruple the size, etc. Maintaining a 66% fill in an ideal table requires the collision allowance to
61 // increase by 3 for every quadrupling of the table size (although this the constant applied to insertion
62 // performance O(c*n))
63 #define EXPAND_FACTOR 2
64 #define COLLISION_GROWTH_RATE 3
66 // For a uniquing table, the useful node size is slots := floor(table_byte_size / (2 * sizeof(mach_vm_address_t)))
67 // Some useful numbers for the initial max collision value (desiring 66% fill):
68 // 16K-23K slots -> 16 collisions
69 // 24K-31K slots -> 17 collisions
70 // 32K-47K slots -> 18 collisions
71 // 48K-79K slots -> 19 collisions
72 // 80K-96K slots -> 20 collisions
73 #define INITIAL_MAX_COLLIDE 19
74 #define DEFAULT_UNIQUING_PAGE_SIZE 256
79 #define STACK_LOGGING_FLAGS(longlongvar) (uint8_t)((uint64_t)(longlongvar) >> 56)
80 #define STACK_LOGGING_OFFSET(longlongvar) ((longlongvar) & 0x00FFFFFFFFFFFFFFull)
81 #define STACK_LOGGING_OFFSET_AND_FLAGS(longlongvar, realshortvar) (((uint64_t)(longlongvar) & 0x00FFFFFFFFFFFFFFull) | ((uint64_t)(realshortvar) << 56))
89 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
90 } stack_logging_index_event
;
95 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
96 } stack_logging_index_event32
;
101 uint64_t offset_and_flags
; // top 8 bits are actually the flags!
102 } stack_logging_index_event64
;
106 uint64_t numPages
; // number of pages of the table
109 uint64_t untouchableNodes
;
110 mach_vm_address_t table_address
;
112 // 'table_address' is just an always 64-bit version of the pointer-sized 'table' field to remotely read;
113 // it's important that the offset of 'table_address' in the struct does not change between 32 and 64-bit.
114 #if BACKTRACE_UNIQUING_DEBUG
116 uint64_t backtracesContained
;
118 mach_vm_address_t
*table
; // allocated using vm_allocate()
119 } backtrace_uniquing_table
;
122 // for storing/looking up allocations that haven't yet be written to disk; consistent size across 32/64-bit processes.
123 // It's important that these fields don't change alignment due to the architecture because they may be accessed from an
124 // analyzing process with a different arch - hence the pragmas.
127 uint64_t start_index_offset
;
128 uint32_t next_free_index_buffer_offset
;
129 mach_vm_address_t uniquing_table_address
;
130 char index_buffer
[STACK_LOGGING_BLOCK_WRITING_SIZE
];
131 backtrace_uniquing_table
*uniquing_table
;
132 } stack_buffer_shared_memory
;
135 // target process address -> record table (for __mach_stack_logging_get_frames)
138 uint64_t index_file_offset
;
141 // for caching index information client-side:
144 size_t cache_node_capacity
;
145 uint32_t collision_allowance
;
146 remote_index_node
*table_memory
; // this can be malloced; it's on the client side.
147 stack_buffer_shared_memory
*shmem
; // shared memory
148 stack_buffer_shared_memory snapshot
; // memory snapshot of the remote process' shared memory
149 uint32_t last_pre_written_index_size
;
150 uint64_t last_index_file_offset
;
151 backtrace_uniquing_table uniquing_table
; // snapshot of the remote process' uniquing table
152 } remote_index_cache
;
154 // for reading stack history information from remote processes:
158 int32_t task_is_64_bit
;
159 int32_t in_use_count
;
160 FILE *index_file_stream
;
161 remote_index_cache
*cache
;
162 } remote_task_file_streams
;
165 #pragma mark Constants/Globals
167 static OSSpinLock stack_logging_lock
= OS_SPINLOCK_INIT
;
169 // support for multi-threaded forks
170 extern void __stack_logging_fork_prepare();
171 extern void __stack_logging_fork_parent();
172 extern void __stack_logging_fork_child();
173 extern void __stack_logging_early_finished();
175 // support for gdb and others checking for stack_logging locks
176 __private_extern__ boolean_t
__stack_logging_locked();
178 // single-thread access variables
179 static stack_buffer_shared_memory
*pre_write_buffers
;
180 static vm_address_t
*stack_buffer
;
181 static uintptr_t last_logged_malloc_address
= 0;
183 // Constants to define stack logging file path names.
184 // Files will get written as /tmp/stack-logs.<pid>.<progname>.XXXXXX.index
185 // unless the base directory is specified otherwise with MallocStackLoggingDirectory.
186 // In this case, a file /tmp/stack-logs.<pid>.<progname>.XXXXXX.link will also be created.
187 static const char *stack_log_file_base_name
= "stack-logs.";
188 static const char *stack_log_file_suffix
= ".index";
189 static const char *stack_log_link_suffix
= ".link";
191 static void *stack_log_path_buffers
= NULL
;
192 static char *stack_log_location
= NULL
;
193 static char *stack_log_reference_file
= NULL
;
194 char *__stack_log_file_path__
= NULL
;
195 static int index_file_descriptor
= -1;
197 // for accessing remote log files
198 static remote_task_file_streams remote_fds
[STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
];
199 static uint32_t next_remote_task_fd
= 0;
200 static uint32_t remote_task_fd_count
= 0;
201 static OSSpinLock remote_fd_list_lock
= OS_SPINLOCK_INIT
;
203 // activation variables
204 static int logging_use_compaction
= 1; // set this to zero to always disable compaction.
206 // We set malloc_logger to NULL to disable logging, if we encounter errors
207 // during file writing
208 typedef void (malloc_logger_t
)(uint32_t type
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
, uintptr_t result
, uint32_t num_hot_frames_to_skip
);
209 extern malloc_logger_t
*malloc_logger
;
212 #pragma mark In-Memory Backtrace Uniquing
214 static __attribute__((always_inline
))
216 allocate_pages(uint64_t memSize
)
218 mach_vm_address_t allocatedMem
= 0ull;
219 if (mach_vm_allocate(mach_task_self(), &allocatedMem
, memSize
, VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL
)) != KERN_SUCCESS
) {
220 malloc_printf("allocate_pages(): virtual memory exhaused!\n");
222 return (void*)(uintptr_t)allocatedMem
;
225 static __attribute__((always_inline
))
227 deallocate_pages(void* memPointer
, uint64_t memSize
)
229 return mach_vm_deallocate(mach_task_self(), (mach_vm_address_t
)(uintptr_t)memPointer
, memSize
);
232 static backtrace_uniquing_table
*
233 __create_uniquing_table(void)
235 backtrace_uniquing_table
*uniquing_table
= (backtrace_uniquing_table
*)allocate_pages((uint64_t)round_page(sizeof(backtrace_uniquing_table
)));
236 if (!uniquing_table
) return NULL
;
237 bzero(uniquing_table
, sizeof(backtrace_uniquing_table
));
238 uniquing_table
->numPages
= DEFAULT_UNIQUING_PAGE_SIZE
;
239 uniquing_table
->tableSize
= uniquing_table
->numPages
* vm_page_size
;
240 uniquing_table
->numNodes
= ((uniquing_table
->tableSize
/ (sizeof(mach_vm_address_t
) * 2)) >> 1) << 1; // make sure it's even.
241 uniquing_table
->table
= (mach_vm_address_t
*)(uintptr_t)allocate_pages(uniquing_table
->tableSize
);
242 uniquing_table
->table_address
= (uintptr_t)uniquing_table
->table
;
243 uniquing_table
->max_collide
= INITIAL_MAX_COLLIDE
;
244 uniquing_table
->untouchableNodes
= 0;
246 #if BACKTRACE_UNIQUING_DEBUG
247 malloc_printf("create_uniquing_table(): creating. size: %lldKB == %lldMB, numnodes: %lld (%lld untouchable)\n", uniquing_table
->tableSize
>> 10, uniquing_table
->tableSize
>> 20, uniquing_table
->numNodes
, uniquing_table
->untouchableNodes
);
248 malloc_printf("create_uniquing_table(): table: %p; end: %p\n", uniquing_table
->table
, (void*)((uintptr_t)uniquing_table
->table
+ (uintptr_t)uniquing_table
->tableSize
));
250 return uniquing_table
;
254 __destroy_uniquing_table(backtrace_uniquing_table
* table
)
256 deallocate_pages(table
->table
, table
->tableSize
);
257 deallocate_pages(table
, sizeof(backtrace_uniquing_table
));
261 __expand_uniquing_table(backtrace_uniquing_table
*uniquing_table
)
263 mach_vm_address_t
*oldTable
= uniquing_table
->table
;
264 uint64_t oldsize
= uniquing_table
->tableSize
;
265 uint64_t oldnumnodes
= uniquing_table
->numNodes
;
267 uniquing_table
->numPages
= uniquing_table
->numPages
<< EXPAND_FACTOR
;
268 uniquing_table
->tableSize
= uniquing_table
->numPages
* vm_page_size
;
269 uniquing_table
->numNodes
= ((uniquing_table
->tableSize
/ (sizeof(mach_vm_address_t
) * 2)) >> 1) << 1; // make sure it's even.
270 mach_vm_address_t
*newTable
= (mach_vm_address_t
*)(uintptr_t)allocate_pages(uniquing_table
->tableSize
);
272 uniquing_table
->table
= newTable
;
273 uniquing_table
->table_address
= (uintptr_t)uniquing_table
->table
;
274 uniquing_table
->max_collide
= uniquing_table
->max_collide
+ COLLISION_GROWTH_RATE
;
276 if (mach_vm_copy(mach_task_self(), (mach_vm_address_t
)(uintptr_t)oldTable
, oldsize
, (mach_vm_address_t
)(uintptr_t)newTable
) != KERN_SUCCESS
) {
277 malloc_printf("expandUniquingTable(): VMCopyFailed\n");
279 uniquing_table
->untouchableNodes
= oldnumnodes
;
281 #if BACKTRACE_UNIQUING_DEBUG
282 malloc_printf("expandUniquingTable(): expanded from nodes full: %lld of: %lld (~%2d%%); to nodes: %lld (inactive = %lld); unique bts: %lld\n",
283 uniquing_table
->nodesFull
, oldnumnodes
, (int)(((uniquing_table
->nodesFull
* 100.0) / (double)oldnumnodes
) + 0.5),
284 uniquing_table
->numNodes
, uniquing_table
->untouchableNodes
, uniquing_table
->backtracesContained
);
285 malloc_printf("expandUniquingTable(): allocate: %p; end: %p\n", newTable
, (void*)((uintptr_t)newTable
+ (uintptr_t)(uniquing_table
->tableSize
)));
286 malloc_printf("expandUniquingTable(): deallocate: %p; end: %p\n", oldTable
, (void*)((uintptr_t)oldTable
+ (uintptr_t)oldsize
));
289 if (deallocate_pages(oldTable
, oldsize
) != KERN_SUCCESS
) {
290 malloc_printf("expandUniquingTable(): mach_vm_deallocate failed. [%p]\n", uniquing_table
->table
);
295 __enter_frames_in_table(backtrace_uniquing_table
*uniquing_table
, uint64_t *foundIndex
, mach_vm_address_t
*frames
, int32_t count
)
297 // The hash values need to be the same size as the addresses (because we use the value -1), for clarity, define a new type
298 typedef mach_vm_address_t hash_index_t
;
300 mach_vm_address_t thisPC
;
301 hash_index_t hash
, uParent
= (hash_index_t
)(-1ll), modulus
= (uniquing_table
->numNodes
-uniquing_table
->untouchableNodes
-1);
302 int32_t collisions
, lcopy
= count
, returnVal
= 1;
303 hash_index_t hash_multiplier
= ((uniquing_table
->numNodes
- uniquing_table
->untouchableNodes
)/(uniquing_table
->max_collide
*2+1));
304 mach_vm_address_t
*node
;
305 while (--lcopy
>= 0) {
306 thisPC
= frames
[lcopy
];
308 // hash = initialHash(uniquing_table, uParent, thisPC);
309 hash
= uniquing_table
->untouchableNodes
+ (((uParent
<< 4) ^ (thisPC
>> 2)) % modulus
);
310 collisions
= uniquing_table
->max_collide
;
312 while (collisions
--) {
313 node
= uniquing_table
->table
+ (hash
* 2);
315 if (*node
== 0 && node
[1] == 0) {
316 // blank; store this entry!
317 // Note that we need to test for both head[0] and head[1] as (0, -1) is a valid entry
321 #if BACKTRACE_UNIQUING_DEBUG
322 uniquing_table
->nodesFull
++;
324 uniquing_table
->backtracesContained
++;
329 if (*node
== thisPC
&& node
[1] == uParent
) {
330 // hit! retrieve index and go.
335 hash
+= collisions
* hash_multiplier
+ 1;
337 if (hash
>= uniquing_table
->numNodes
) {
338 hash
-= (uniquing_table
->numNodes
- uniquing_table
->untouchableNodes
); // wrap around.
342 if (collisions
< 0) {
348 if (returnVal
) *foundIndex
= uParent
;
354 __unwind_stack_from_table_index(backtrace_uniquing_table
*uniquing_table
, uint64_t index_pos
, mach_vm_address_t
*out_frames_buffer
, uint32_t *out_frames_count
, uint32_t max_frames
)
356 mach_vm_address_t
*node
= uniquing_table
->table
+ (index_pos
* 2);
357 uint32_t foundFrames
= 0;
358 if (index_pos
< uniquing_table
->numNodes
) {
359 while (foundFrames
< max_frames
) {
360 out_frames_buffer
[foundFrames
++] = node
[0];
361 if (node
[1] == (mach_vm_address_t
)(-1ll)) break;
362 node
= uniquing_table
->table
+ (node
[1] * 2);
366 *out_frames_count
= foundFrames
;
370 #pragma mark Disk Stack Logging
372 static void delete_log_files(void); // pre-declare
373 static int delete_logging_file(char *log_location
);
376 append_int(char * filename
, pid_t pid
, size_t maxLength
)
378 size_t len
= strlen(filename
);
387 if (len
+ count
>= maxLength
) return; // don't modify the string if it would violate maxLength
389 filename
[len
+ count
] = '\0';
393 for (i
= 0 ; i
< count
; i
++) {
394 filename
[len
+ count
- 1 - i
] = '0' + value
% 10;
400 * <rdar://problem/11128080> if we needed to call confstr during init then setting this
401 * flag will postpone stack logging until after Libsystem's initialiser has run.
404 postpone_stack_logging(void)
406 _malloc_printf(ASL_LEVEL_INFO
, "stack logging postponed until after initialization.\n");
407 stack_logging_postponed
= 1;
411 * Check various temporary directory options starting with _PATH_TMP and use confstr.
412 * Allocating and releasing target buffer is the caller's responsibility.
415 get_writeable_temp_dir(char* target
)
417 if (!target
) return false;
418 if (-1 != access(_PATH_TMP
, W_OK
)) {
419 strlcpy(target
, _PATH_TMP
, (size_t)PATH_MAX
);
422 if (getenv("TMPDIR") && (-1 != access(getenv("TMPDIR"), W_OK
))) {
423 strlcpy(target
, getenv("TMPDIR"), (size_t)PATH_MAX
);
426 if (stack_logging_finished_init
) {
427 size_t n
= confstr(_CS_DARWIN_USER_TEMP_DIR
, target
, (size_t) PATH_MAX
);
428 if ((n
> 0) && (n
< PATH_MAX
)) return true;
429 n
= confstr(_CS_DARWIN_USER_CACHE_DIR
, target
, (size_t) PATH_MAX
);
430 if ((n
> 0) && (n
< PATH_MAX
)) return true;
432 /* <rdar://problem/11128080> Can't call confstr during init, so postpone
433 logging till after */
434 postpone_stack_logging();
436 /* No writeable tmp directory found. Maybe shd try /private/var/tmp for device here ... */
442 * If successful, returns path to log file that was created, otherwise NULL.
444 * The log could be in one of 3 places (in decreasing order of preference)
446 * 1) value of environment variable MallocStackLoggingDirectory
447 * 2) the temp directory /tmp for desktop apps and internal apps on devices, or
448 * 3) the sandbox location + tmp/ in case of 3rd party apps on the device.
450 * For 1 and 3, we create a .link file with the path of the file. We prefer to
451 * create this file in /tmp, but if we are unable to (device 3rd party case),
452 * we create it in the same location as the .index file and issue a message
453 * in syslog asking for it to be copied to /tmp to enable tools.
457 create_log_file(void)
459 pid_t pid
= getpid();
460 const char *progname
= getprogname();
461 char *created_log_location
= NULL
;
463 if (stack_log_path_buffers
== NULL
) {
465 * on first use, allocate buffers directly from the OS without
469 stack_log_path_buffers
= allocate_pages((uint64_t)round_page(3*PATH_MAX
));
470 if (stack_log_path_buffers
== NULL
) {
471 _malloc_printf(ASL_LEVEL_INFO
, "unable to allocate memory for path buffers\n");
475 stack_log_location
= &((char *)stack_log_path_buffers
)[0*PATH_MAX
];
476 stack_log_reference_file
= &((char *)stack_log_path_buffers
)[1*PATH_MAX
];
477 __stack_log_file_path__
= &((char *)stack_log_path_buffers
)[2*PATH_MAX
];
480 // WARNING! use of snprintf can induce malloc() calls
481 bool use_alternate_location
= false;
482 char *evn_log_directory
= getenv("MallocStackLoggingDirectory");
483 size_t stack_log_len
;
484 if (evn_log_directory
&& *evn_log_directory
) {
485 use_alternate_location
= true;
486 strlcpy(stack_log_location
, evn_log_directory
, (size_t)PATH_MAX
);
488 if (!use_alternate_location
|| (access(stack_log_location
, W_OK
) == -1)) {
489 if (!get_writeable_temp_dir(stack_log_location
)) {
490 if (!stack_logging_postponed
) {
491 _malloc_printf(ASL_LEVEL_INFO
, "No writeable tmp dir\n");
495 if (0 != strcmp(stack_log_location
, _PATH_TMP
))
496 use_alternate_location
= true;
498 stack_log_len
= strlen(stack_log_location
);
499 // add the '/' only if it's not already there.
500 if (stack_log_location
[stack_log_len
-1] != '/') {
501 strlcat(stack_log_location
, "/", (size_t)PATH_MAX
);
505 strlcpy(__stack_log_file_path__
, stack_log_location
, (size_t)PATH_MAX
);
507 strlcat(__stack_log_file_path__
, stack_log_file_base_name
, (size_t)PATH_MAX
);
508 append_int(__stack_log_file_path__
, pid
, (size_t)PATH_MAX
);
509 if (progname
&& progname
[0] != '\0') {
510 strlcat(__stack_log_file_path__
, ".", (size_t)PATH_MAX
);
511 strlcat(__stack_log_file_path__
, progname
, (size_t)PATH_MAX
);
513 if (!use_alternate_location
) strlcat(__stack_log_file_path__
, ".XXXXXX", (size_t)PATH_MAX
);
514 strlcat(__stack_log_file_path__
, stack_log_file_suffix
, (size_t)PATH_MAX
);
516 // Securely create the log file.
517 if ((index_file_descriptor
= mkstemps(__stack_log_file_path__
, (int)strlen(stack_log_file_suffix
))) != -1) {
518 _malloc_printf(ASL_LEVEL_INFO
, "stack logs being written into %s\n", __stack_log_file_path__
);
519 created_log_location
= __stack_log_file_path__
;
521 _malloc_printf(ASL_LEVEL_INFO
, "unable to create stack logs at %s\n", stack_log_location
);
522 if (use_alternate_location
) delete_logging_file(stack_log_reference_file
);
523 stack_log_reference_file
[0] = '\0';
524 stack_log_location
[0] = '\0';
525 __stack_log_file_path__
[0] = '\0';
526 created_log_location
= NULL
;
527 return created_log_location
;
530 // in the case where the user has specified an alternate location, drop a reference file
531 // in /tmp with the suffix 'stack_log_link_suffix' (".link") and save the path of the
532 // stack logging file there.
533 bool use_alternate_link_location
= false;
534 if (use_alternate_location
) {
535 strlcpy(stack_log_reference_file
, _PATH_TMP
, (size_t)PATH_MAX
);
536 if (access(stack_log_reference_file
, W_OK
) == -1) {
537 strlcpy(stack_log_reference_file
, stack_log_location
, (size_t)PATH_MAX
);
538 use_alternate_link_location
= true;
540 strlcat(stack_log_reference_file
, stack_log_file_base_name
, (size_t)PATH_MAX
);
541 append_int(stack_log_reference_file
, pid
, (size_t)PATH_MAX
);
542 if (progname
&& progname
[0] != '\0') {
543 strlcat(stack_log_reference_file
, ".", (size_t)PATH_MAX
);
544 strlcat(stack_log_reference_file
, progname
, (size_t)PATH_MAX
);
546 if (!use_alternate_link_location
)
547 strlcat(stack_log_reference_file
, ".XXXXXX", (size_t)PATH_MAX
);
548 strlcat(stack_log_reference_file
, ".XXXXXX", (size_t)PATH_MAX
);
549 strlcat(stack_log_reference_file
, stack_log_link_suffix
, (size_t)PATH_MAX
);
551 int link_file_descriptor
= mkstemps(stack_log_reference_file
, (int)strlen(stack_log_link_suffix
));
552 if (link_file_descriptor
== -1) {
553 _malloc_printf(ASL_LEVEL_INFO
, "unable to create stack reference file %s at %s\n",
554 stack_log_reference_file
, stack_log_location
);
556 ssize_t written
= write(link_file_descriptor
, __stack_log_file_path__
, strlen(__stack_log_file_path__
));
557 if (written
< (ssize_t
)strlen(__stack_log_file_path__
)) {
558 _malloc_printf(ASL_LEVEL_INFO
, "unable to write to stack reference file %s at %s\n",
559 stack_log_reference_file
, stack_log_location
);
561 const char *description_string
= "\n(This is a reference file to the stack logs at the path above.)\n";
562 write(link_file_descriptor
, description_string
, strlen(description_string
));
565 close(link_file_descriptor
);
567 if (use_alternate_link_location
) {
568 _malloc_printf(ASL_LEVEL_INFO
, "Please issue: cp %s %s\n", stack_log_reference_file
, _PATH_TMP
);
570 return created_log_location
;
573 // Check to see if the log file is actually a reference to another location
575 log_file_is_reference(char *log_location
, char *out_reference_loc_buffer
, size_t max_reference_path_size
)
577 if (log_location
== NULL
|| log_location
[0] == '\0') return 0;
579 size_t log_len
= strlen(log_location
);
580 size_t link_suffix_len
= strlen(stack_log_link_suffix
);
581 if (log_len
< link_suffix_len
|| strncmp(log_location
+log_len
-link_suffix_len
, stack_log_link_suffix
, link_suffix_len
) != 0) {
582 // not a reference file.
586 if (!out_reference_loc_buffer
|| max_reference_path_size
== 0) return 1;
588 FILE *reference_file
= fopen(log_location
, "r");
589 if (reference_file
== NULL
) {
590 // if unable to open the file, it may be because another user created it; no need to warn.
591 out_reference_loc_buffer
[0] = '\0';
595 char *ret
= fgets(out_reference_loc_buffer
, (int)max_reference_path_size
, reference_file
);
597 out_reference_loc_buffer
[0] = '\0';
598 _malloc_printf(ASL_LEVEL_INFO
, "unable to read from stack logging reference file at %s\n", log_location
);
601 size_t read_line_len
= strlen(out_reference_loc_buffer
);
602 if (read_line_len
>= 1 && out_reference_loc_buffer
[read_line_len
-1] == '\n') {
603 out_reference_loc_buffer
[read_line_len
-1] = '\0';
607 fclose(reference_file
);
612 // This function may be called from either the target process when exiting, or from either the the target process or
613 // a stack log analysis process, when reaping orphaned stack log files.
614 // Returns -1 if the files exist and they couldn't be removed, returns 0 otherwise.
616 delete_logging_file(char *log_location
)
618 if (log_location
== NULL
|| log_location
[0] == '\0') return 0;
621 if (unlink(log_location
) != 0 && stat(log_location
, &statbuf
) == 0) {
627 // This function will be called from atexit() in the target process.
629 delete_log_files(void)
631 if (__stack_log_file_path__
&& __stack_log_file_path__
[0]) {
632 if (delete_logging_file(__stack_log_file_path__
) == 0) {
633 _malloc_printf(ASL_LEVEL_INFO
, "stack logs deleted from %s\n", __stack_log_file_path__
);
634 __stack_log_file_path__
[0] = '\0';
636 _malloc_printf(ASL_LEVEL_INFO
, "unable to delete stack logs from %s\n", __stack_log_file_path__
);
639 if (stack_log_reference_file
&& stack_log_reference_file
[0]) {
640 delete_logging_file(stack_log_reference_file
);
645 is_process_running(pid_t pid
)
647 struct kinfo_proc kpt
[1];
648 size_t size
= sizeof(struct kinfo_proc
);
649 int mib
[] = {CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid
};
651 sysctl(mib
, 4, kpt
, &size
, NULL
, (size_t)0); // size is either 1 or 0 entries when we ask for a single pid
653 return (size
==sizeof(struct kinfo_proc
));
656 // The log files can be quite large and aren't too useful after the process that created them no longer exists.
657 // Normally they should get removed when the process exits, but if the process crashed the log files might remain.
658 // So, reap any stack log files for processes that no longer exist.
660 // lf the remove_for_this_pid flag is set, then any log files that already exist for the current process will also be deleted.
661 // Those log files are probably the result of this process having been exec'ed from another one (without a fork()).
662 // The remove_for_this_pid flag is only set for a target process (one just starting logging); a stack logging "client"
663 // process reaps log files too, but if we're using stack logging on the client process itself, then we don't want to remove
664 // its own log files.
666 reap_orphaned_log_files(bool remove_for_this_pid
)
669 struct dirent
*entry
;
670 char prefix_name
[PATH_MAX
];
671 char pathname
[PATH_MAX
];
672 pid_t current_pid
= getpid();
674 if ((dp
= opendir(_PATH_TMP
)) == NULL
) {
678 strlcpy(prefix_name
, stack_log_file_base_name
, (size_t)PATH_MAX
);
679 size_t prefix_length
= strlen(prefix_name
);
681 while ( (entry
= readdir(dp
)) != NULL
) {
682 if ( entry
->d_type
!= DT_DIR
&& entry
->d_type
!= DT_LNK
&& ( strncmp( entry
->d_name
, prefix_name
, prefix_length
) == 0 ) ) {
683 long pid
= strtol(&entry
->d_name
[prefix_length
], (char **)NULL
, 10);
684 if ( (! is_process_running((pid_t
)pid
)) || (remove_for_this_pid
&& (pid_t
)pid
== current_pid
) ) {
685 strlcpy(pathname
, _PATH_TMP
, (size_t)PATH_MAX
);
686 strlcat(pathname
, entry
->d_name
, (size_t)PATH_MAX
);
687 char reference_file_buffer
[PATH_MAX
];
688 bool pathname_is_ref_file
= false;
689 if (log_file_is_reference(pathname
, reference_file_buffer
, (size_t)PATH_MAX
) && *reference_file_buffer
) {
690 pathname_is_ref_file
= true;
691 if (delete_logging_file(reference_file_buffer
) == 0) {
692 if (remove_for_this_pid
&& pid
== current_pid
) {
693 _malloc_printf(ASL_LEVEL_INFO
, "stack logs deleted from %s\n", reference_file_buffer
);
695 _malloc_printf(ASL_LEVEL_INFO
, "process %ld no longer exists, stack logs deleted from %s\n", pid
, reference_file_buffer
);
699 if (delete_logging_file(pathname
) == 0) {
700 if (remove_for_this_pid
&& pid
== current_pid
) {
701 if (!pathname_is_ref_file
) _malloc_printf(ASL_LEVEL_INFO
, "stack logs deleted from %s\n", pathname
);
703 if (!pathname_is_ref_file
) _malloc_printf(ASL_LEVEL_INFO
, "process %ld no longer exists, stack logs deleted from %s\n", pid
, pathname
);
705 char shmem_name_string
[PATH_MAX
];
706 strlcpy(shmem_name_string
, stack_log_file_base_name
, (size_t)PATH_MAX
);
707 append_int(shmem_name_string
, (pid_t
)pid
, (size_t)PATH_MAX
);
708 if (pid
!= current_pid
) shm_unlink(shmem_name_string
);
717 * Since there a many errors that could cause stack logging to get disabled, this is a convenience method
718 * for disabling any future logging in this process and for informing the user.
721 disable_stack_logging(void)
723 _malloc_printf(ASL_LEVEL_INFO
, "stack logging disabled due to previous errors.\n");
724 stack_logging_enable_logging
= 0;
725 malloc_logger
= NULL
;
728 /* A wrapper around write() that will try to reopen the index/stack file and
729 * write to it if someone closed it underneath us (e.g. the process we just
730 * started decide to close all file descriptors except stin/err/out). Some
731 * programs like to do that and calling abort() on them is rude.
734 robust_write(int fd
, const void *buf
, size_t nbyte
) {
736 ssize_t written
= write(fd
, buf
, nbyte
);
737 if (written
== -1 && errno
== EBADF
) {
738 char *file_to_reopen
= NULL
;
739 int *fd_to_reset
= NULL
;
741 // descriptor was closed on us. We need to reopen it
742 if (fd
== index_file_descriptor
) {
743 file_to_reopen
= __stack_log_file_path__
;
744 fd_to_reset
= &index_file_descriptor
;
746 // We don't know about this file. Return (and abort()).
747 _malloc_printf(ASL_LEVEL_INFO
, "Unknown file descriptor; expecting stack logging index file\n");
751 // The file *should* already exist. If not, fail.
752 fd
= open(file_to_reopen
, O_WRONLY
| O_APPEND
);
754 // If we somehow got stdin/out/err, we need to relinquish them and
756 int fds_to_close
[3] = { 0 };
759 _malloc_printf(ASL_LEVEL_INFO
, "unable to re-open stack logging file %s\n", file_to_reopen
);
763 fds_to_close
[fd
] = 1;
767 // We have an fd we like. Close the ones we opened.
768 if (fds_to_close
[0]) close(0);
769 if (fds_to_close
[1]) close(1);
770 if (fds_to_close
[2]) close(2);
774 written
= write(fd
, buf
, nbyte
);
782 ssize_t written
; // signed size_t
786 if (index_file_descriptor
== -1) {
787 if (create_log_file() == NULL
) {
792 // Write the events before the index so that hopefully the events will be on disk if the index refers to them.
793 p
= pre_write_buffers
->index_buffer
;
794 remaining
= (size_t)pre_write_buffers
->next_free_index_buffer_offset
;
795 while (remaining
> 0) {
796 written
= robust_write(index_file_descriptor
, p
, remaining
);
798 _malloc_printf(ASL_LEVEL_INFO
, "Unable to write to stack logging file %s (%s)\n",
799 __stack_log_file_path__
, strerror(errno
));
800 disable_stack_logging();
804 remaining
-= written
;
807 pre_write_buffers
->start_index_offset
+= pre_write_buffers
->next_free_index_buffer_offset
;
808 pre_write_buffers
->next_free_index_buffer_offset
= 0;
812 prepare_to_log_stacks(void)
814 if (!pre_write_buffers
) {
815 last_logged_malloc_address
= 0ul;
816 logging_use_compaction
= (stack_logging_dontcompact
? 0 : logging_use_compaction
);
818 // Create a shared memory region to hold the pre-write index and stack buffers. This will allow remote analysis processes to access
819 // these buffers to get logs for even the most recent allocations. The remote process will need to pause this process to assure that
820 // the contents of these buffers don't change while being inspected.
821 char shmem_name_string
[PATH_MAX
];
822 strlcpy(shmem_name_string
, stack_log_file_base_name
, (size_t)PATH_MAX
);
823 append_int(shmem_name_string
, getpid(), (size_t)PATH_MAX
);
825 int shmid
= shm_open(shmem_name_string
, O_RDWR
| O_CREAT
, S_IRUSR
| S_IWUSR
);
827 // Failed to create shared memory region; turn off stack logging.
828 _malloc_printf(ASL_LEVEL_INFO
, "error while allocating shared memory for disk-based stack logging output buffers\n");
829 disable_stack_logging();
833 size_t full_shared_mem_size
= sizeof(stack_buffer_shared_memory
);
834 ftruncate(shmid
, (off_t
)full_shared_mem_size
);
835 pre_write_buffers
= (stack_buffer_shared_memory
*)mmap(0, full_shared_mem_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, shmid
, (off_t
)0);
838 if (MAP_FAILED
== pre_write_buffers
) {
839 _malloc_printf(ASL_LEVEL_INFO
, "error mapping in shared memory for disk-based stack logging output buffers\n");
840 disable_stack_logging();
844 // Store and use the buffer offsets in shared memory so that they can be accessed remotely
845 pre_write_buffers
->start_index_offset
= 0ull;
846 pre_write_buffers
->next_free_index_buffer_offset
= 0;
848 // create the backtrace uniquing table
849 pre_write_buffers
->uniquing_table
= __create_uniquing_table();
850 pre_write_buffers
->uniquing_table_address
= (mach_vm_address_t
)(uintptr_t)pre_write_buffers
->uniquing_table
;
851 if (!pre_write_buffers
->uniquing_table
) {
852 _malloc_printf(ASL_LEVEL_INFO
, "error while allocating stack uniquing table\n");
853 disable_stack_logging();
857 uint64_t stack_buffer_sz
= (uint64_t)round_page(sizeof(vm_address_t
) * STACK_LOGGING_MAX_STACK_SIZE
);
858 stack_buffer
= (vm_address_t
*)allocate_pages(stack_buffer_sz
);
860 _malloc_printf(ASL_LEVEL_INFO
, "error while allocating stack trace buffer\n");
861 disable_stack_logging();
865 // malloc() can be called by the following, so these need to be done outside the stack_logging_lock but after the buffers have been set up.
866 atexit(delete_log_files
); // atexit() can call malloc()
867 reap_orphaned_log_files(true); // this calls opendir() which calls malloc()
869 // this call ensures that the log files exist; analyzing processes will rely on this assumption.
870 if (create_log_file() == NULL
) {
871 /* postponement support requires cleaning up these structures now */
872 __destroy_uniquing_table(pre_write_buffers
->uniquing_table
);
873 deallocate_pages(stack_buffer
, stack_buffer_sz
);
876 munmap(pre_write_buffers
, full_shared_mem_size
);
877 pre_write_buffers
= NULL
;
879 if (!stack_logging_postponed
) {
880 disable_stack_logging();
888 __disk_stack_logging_log_stack(uint32_t type_flags
, uintptr_t zone_ptr
, uintptr_t size
, uintptr_t ptr_arg
, uintptr_t return_val
, uint32_t num_hot_to_skip
)
890 if (!stack_logging_enable_logging
|| stack_logging_postponed
) return;
892 // check incoming data
893 if (type_flags
& stack_logging_type_alloc
&& type_flags
& stack_logging_type_dealloc
) {
894 uintptr_t swapper
= size
;
897 if (ptr_arg
== return_val
) return; // realloc had no effect, skipping
899 if (ptr_arg
== 0) { // realloc(NULL, size) same as malloc(size)
900 type_flags
^= stack_logging_type_dealloc
;
902 // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
903 __disk_stack_logging_log_stack(stack_logging_type_dealloc
, zone_ptr
, ptr_arg
, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip
+ 1);
904 __disk_stack_logging_log_stack(stack_logging_type_alloc
, zone_ptr
, size
, (uintptr_t)0, return_val
, num_hot_to_skip
+ 1);
908 if (type_flags
& stack_logging_type_dealloc
) {
912 } else return; // free(nil)
914 if (type_flags
& stack_logging_type_alloc
&& return_val
== 0) return; // alloc that failed
918 // now actually begin
919 prepare_to_log_stacks();
921 // since there could have been a fatal (to stack logging) error such as the log files not being created, check this variable before continuing
922 if (!stack_logging_enable_logging
|| stack_logging_postponed
) return;
924 vm_address_t self_thread
= (vm_address_t
)pthread_self(); // use pthread_self() rather than mach_thread_self() to avoid system call
927 OSSpinLockLock(&stack_logging_lock
);
929 if (!stack_logging_enable_logging
) {
930 OSSpinLockUnlock(&stack_logging_lock
);
935 if (last_logged_malloc_address
&& (type_flags
& stack_logging_type_dealloc
) && STACK_LOGGING_DISGUISE(ptr_arg
) == last_logged_malloc_address
) {
936 // *waves hand* the last allocation never occurred
937 pre_write_buffers
->next_free_index_buffer_offset
-= (uint32_t)sizeof(stack_logging_index_event
);
938 last_logged_malloc_address
= 0ul;
940 OSSpinLockUnlock(&stack_logging_lock
);
946 thread_stack_pcs(stack_buffer
, STACK_LOGGING_MAX_STACK_SIZE
-1, &count
); // only gather up to STACK_LOGGING_MAX_STACK_SIZE-1 since we append thread id
947 stack_buffer
[count
++] = self_thread
+ 1; // stuffing thread # in the coldest slot. Add 1 to match what the old stack logging did.
948 num_hot_to_skip
+= 2;
949 if (count
<= num_hot_to_skip
) {
950 // Oops! Didn't get a valid backtrace from thread_stack_pcs().
951 OSSpinLockUnlock(&stack_logging_lock
);
955 // unique stack in memory
956 count
-= num_hot_to_skip
;
958 mach_vm_address_t
*frames
= (mach_vm_address_t
*)stack_buffer
+ num_hot_to_skip
;
960 mach_vm_address_t frames
[STACK_LOGGING_MAX_STACK_SIZE
];
962 for (i
= 0; i
< count
; i
++) {
963 frames
[i
] = stack_buffer
[i
+num_hot_to_skip
];
967 uint64_t uniqueStackIdentifier
= (uint64_t)(-1ll);
968 while (!__enter_frames_in_table(pre_write_buffers
->uniquing_table
, &uniqueStackIdentifier
, frames
, (int32_t)count
)) {
969 __expand_uniquing_table(pre_write_buffers
->uniquing_table
);
972 stack_logging_index_event current_index
;
973 if (type_flags
& stack_logging_type_alloc
) {
974 current_index
.address
= STACK_LOGGING_DISGUISE(return_val
);
975 current_index
.argument
= size
;
976 if (logging_use_compaction
) {
977 last_logged_malloc_address
= current_index
.address
; // disguised
980 current_index
.address
= STACK_LOGGING_DISGUISE(ptr_arg
);
981 current_index
.argument
= 0ul;
982 last_logged_malloc_address
= 0ul;
984 current_index
.offset_and_flags
= STACK_LOGGING_OFFSET_AND_FLAGS(uniqueStackIdentifier
, type_flags
);
986 // the following line is a good debugging tool for logging each allocation event as it happens.
987 // malloc_printf("{0x%lx, %lld}\n", STACK_LOGGING_DISGUISE(current_index.address), uniqueStackIdentifier);
989 // flush the data buffer to disk if necessary
990 if (pre_write_buffers
->next_free_index_buffer_offset
+ sizeof(stack_logging_index_event
) >= STACK_LOGGING_BLOCK_WRITING_SIZE
) {
994 // store bytes in buffers
995 memcpy(pre_write_buffers
->index_buffer
+pre_write_buffers
->next_free_index_buffer_offset
, ¤t_index
, sizeof(stack_logging_index_event
));
996 pre_write_buffers
->next_free_index_buffer_offset
+= (uint32_t)sizeof(stack_logging_index_event
);
998 OSSpinLockUnlock(&stack_logging_lock
);
1002 __stack_logging_fork_prepare() {
1003 OSSpinLockLock(&stack_logging_lock
);
1007 __stack_logging_fork_parent() {
1008 OSSpinLockUnlock(&stack_logging_lock
);
1012 __stack_logging_fork_child() {
1013 malloc_logger
= NULL
;
1014 stack_logging_enable_logging
= 0;
1015 OSSpinLockUnlock(&stack_logging_lock
);
1019 __stack_logging_early_finished() {
1020 stack_logging_finished_init
= 1;
1021 stack_logging_postponed
= 0;
1025 __stack_logging_locked()
1027 bool acquired_lock
= OSSpinLockTry(&stack_logging_lock
);
1028 if (acquired_lock
) OSSpinLockUnlock(&stack_logging_lock
);
1029 return (acquired_lock
? false : true);
1033 #pragma mark Remote Stack Log Access
1035 #pragma mark - Design notes:
1039 this first one will look through the index, find the "stack_identifier" (i.e. the offset in the log file), and call the third function listed here.
1040 extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *num_frames);
1041 // Gets the last allocation record about address
1043 if !address, will load index and iterate through (expensive)
1044 else will load just index, search for stack, and then use third function here to retrieve. (also expensive)
1045 extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
1046 // Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records
1048 this function will load the stack file, look for the stack, and follow up to STACK_LOGGING_FORCE_FULL_BACKTRACE_EVERY references to reconstruct.
1049 extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
1050 // Given a uniqued_stack fills stack_frames_buffer
1054 #pragma mark - caching
1056 __attribute__((always_inline
)) static inline size_t
1057 hash_index(uint64_t address
, size_t max_pos
) {
1058 return (size_t)((address
>> 2) % (max_pos
-1)); // simplicity rules.
1061 __attribute__((always_inline
)) static inline size_t
1062 hash_multiplier(size_t capacity
, uint32_t allowed_collisions
) {
1063 return (capacity
/(allowed_collisions
*2+1));
1066 __attribute__((always_inline
)) static inline size_t
1067 next_hash(size_t hash
, size_t multiplier
, size_t capacity
, uint32_t collisions
) {
1068 hash
+= multiplier
* collisions
;
1069 if (hash
>= capacity
) hash
-= capacity
;
1074 transfer_node(remote_index_cache
*cache
, remote_index_node
*old_node
)
1076 uint32_t collisions
= 0;
1077 size_t pos
= hash_index(old_node
->address
, cache
->cache_node_capacity
);
1078 size_t multiplier
= hash_multiplier(cache
->cache_node_capacity
, cache
->collision_allowance
);
1080 if (cache
->table_memory
[pos
].address
== old_node
->address
) { // hit like this shouldn't happen.
1081 fprintf(stderr
, "impossible collision! two address==address lists! (transfer_node)\n");
1083 } else if (cache
->table_memory
[pos
].address
== 0) { // empty
1084 cache
->table_memory
[pos
] = *old_node
;
1088 pos
= next_hash(pos
, multiplier
, cache
->cache_node_capacity
, collisions
);
1090 } while (collisions
<= cache
->collision_allowance
);
1092 if (collisions
> cache
->collision_allowance
) {
1093 fprintf(stderr
, "reporting bad hash function! disk stack logging reader %lu bit. (transfer_node)\n", sizeof(void*)*8);
1098 expand_cache(remote_index_cache
*cache
)
1101 size_t old_node_capacity
= cache
->cache_node_capacity
;
1102 remote_index_node
*old_table
= cache
->table_memory
;
1105 cache
->cache_size
<<= 2;
1106 cache
->cache_node_capacity
<<= 2;
1107 cache
->collision_allowance
+= 3;
1108 cache
->table_memory
= (void*)calloc(cache
->cache_node_capacity
, sizeof(remote_index_node
));
1110 // repopulate (expensive!)
1112 for (i
= 0; i
< old_node_capacity
; i
++) {
1113 if (old_table
[i
].address
) {
1114 transfer_node(cache
, &old_table
[i
]);
1118 // printf("cache expanded to %0.2f mb (eff: %3.0f%%, capacity: %lu, nodes: %llu, llnodes: %llu)\n", ((float)(cache->cache_size))/(1 << 20), ((float)(cache->cache_node_count)*100.0)/((float)(cache->cache_node_capacity)), cache->cache_node_capacity, cache->cache_node_count, cache->cache_llnode_count);
1122 insert_node(remote_index_cache
*cache
, uint64_t address
, uint64_t index_file_offset
)
1124 uint32_t collisions
= 0;
1125 size_t pos
= hash_index(address
, cache
->cache_node_capacity
);
1126 size_t multiplier
= hash_multiplier(cache
->cache_node_capacity
, cache
->collision_allowance
);
1128 bool inserted
= false;
1130 if (cache
->table_memory
[pos
].address
== 0ull || cache
->table_memory
[pos
].address
== address
) { // hit or empty
1131 cache
->table_memory
[pos
].address
= address
;
1132 cache
->table_memory
[pos
].index_file_offset
= index_file_offset
;
1138 pos
= next_hash(pos
, multiplier
, cache
->cache_node_capacity
, collisions
);
1140 if (collisions
> cache
->collision_allowance
) {
1141 expand_cache(cache
);
1142 pos
= hash_index(address
, cache
->cache_node_capacity
);
1143 multiplier
= hash_multiplier(cache
->cache_node_capacity
, cache
->collision_allowance
);
1151 update_cache_for_file_streams(remote_task_file_streams
*descriptors
)
1153 remote_index_cache
*cache
= descriptors
->cache
;
1155 // create from scratch if necessary.
1157 descriptors
->cache
= cache
= (remote_index_cache
*)calloc((size_t)1, sizeof(remote_index_cache
));
1158 cache
->cache_node_capacity
= 1 << 14;
1159 cache
->collision_allowance
= 17;
1160 cache
->last_index_file_offset
= 0;
1161 cache
->cache_size
= cache
->cache_node_capacity
*sizeof(remote_index_node
);
1162 cache
->table_memory
= (void*)calloc(cache
->cache_node_capacity
, sizeof(remote_index_node
));
1164 // now map in the shared memory, if possible
1165 char shmem_name_string
[PATH_MAX
];
1166 strlcpy(shmem_name_string
, stack_log_file_base_name
, (size_t)PATH_MAX
);
1167 append_int(shmem_name_string
, descriptors
->remote_pid
, (size_t)PATH_MAX
);
1169 int shmid
= shm_open(shmem_name_string
, O_RDWR
, S_IRUSR
| S_IWUSR
);
1171 cache
->shmem
= mmap(0, sizeof(stack_buffer_shared_memory
), PROT_READ
| PROT_WRITE
, MAP_SHARED
, shmid
, (off_t
)0);
1175 if (shmid
< 0 || cache
->shmem
== MAP_FAILED
) {
1176 // failed to connect to the shared memory region; warn and continue.
1177 _malloc_printf(ASL_LEVEL_INFO
, "warning: unable to connect to remote process' shared memory; allocation histories may not be up-to-date.\n");
1181 // suspend and see how much updating there is to do. there are three scenarios, listed below
1182 bool update_snapshot
= false;
1183 if (descriptors
->remote_task
!= mach_task_self()) {
1184 task_suspend(descriptors
->remote_task
);
1187 struct stat file_statistics
;
1188 fstat(fileno(descriptors
->index_file_stream
), &file_statistics
);
1189 size_t read_size
= (descriptors
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1190 uint64_t read_this_update
= 0;
1192 // the delta indecies is a complex number; there are three cases:
1193 // 1. there is no shared memory (or we can't connect); diff the last_index_file_offset from the filesize.
1194 // 2. the only updates have been in shared memory; disk file didn't change at all. delta_indecies should be zero, scan snapshot only.
1195 // 3. the updates have flushed to disk, meaning that most likely there is new data on disk that wasn't read from shared memory.
1196 // correct delta_indecies for the pre-scanned amount and read the new data from disk and shmem.
1197 uint64_t delta_indecies
= (file_statistics
.st_size
- cache
->last_index_file_offset
) / read_size
;
1198 uint32_t last_snapshot_scan_index
= 0;
1199 if (delta_indecies
&& cache
->shmem
) {
1200 // case 3: add cache scanned to known from disk and recalc
1201 cache
->last_index_file_offset
+= cache
->snapshot
.next_free_index_buffer_offset
;
1202 delta_indecies
= (file_statistics
.st_size
- cache
->last_index_file_offset
) / read_size
;
1203 update_snapshot
= true;
1204 } else if (cache
->shmem
) {
1205 // case 2: set the last snapshot scan count so we don't rescan something we've seen.
1206 last_snapshot_scan_index
= cache
->snapshot
.next_free_index_buffer_offset
/ (uint32_t)read_size
;
1209 // no update necessary for the file; check if need a snapshot.
1210 if (delta_indecies
== 0) {
1211 if (cache
->shmem
&& !update_snapshot
) {
1212 update_snapshot
= (cache
->shmem
->next_free_index_buffer_offset
!= cache
->snapshot
.next_free_index_buffer_offset
);
1216 // if a snapshot is necessary, memcpy from remote frozen process' memory
1217 // note: there were two ways to do this - spin lock or suspend. suspend allows us to
1218 // analyze processes even if they were artificially suspended. with a lock, there'd be
1219 // worry that the target was suspended with the lock taken.
1220 if (update_snapshot
) {
1221 memcpy(&cache
->snapshot
, cache
->shmem
, sizeof(stack_buffer_shared_memory
));
1222 // also need to update our version of the remote uniquing table
1223 vm_address_t local_uniquing_address
= 0ul;
1224 mach_msg_type_number_t local_uniquing_size
= 0;
1225 mach_vm_size_t desired_size
= round_page(sizeof(backtrace_uniquing_table
));
1227 if ((err
= mach_vm_read(descriptors
->remote_task
, cache
->shmem
->uniquing_table_address
, desired_size
, &local_uniquing_address
, &local_uniquing_size
)) != KERN_SUCCESS
1228 || local_uniquing_size
!= desired_size
) {
1229 fprintf(stderr
, "error while attempting to mach_vm_read remote stack uniquing table (%d): %s\n", err
, mach_error_string(err
));
1231 // the mach_vm_read was successful, so acquire the uniquing table
1233 // need to re-read the table, so deallocate the current memory
1234 if (cache
->uniquing_table
.table
) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t
)(uintptr_t)(cache
->uniquing_table
.table
), cache
->uniquing_table
.tableSize
);
1236 // the following line gathers the uniquing table structure data, but the actual table memory is invalid since it's a pointer from the
1237 // remote process. this pointer will be mapped shared in a few lines.
1238 cache
->uniquing_table
= *((backtrace_uniquing_table
*)local_uniquing_address
);
1240 vm_address_t local_table_address
= 0ul;
1241 mach_msg_type_number_t local_table_size
= 0;
1243 err
= mach_vm_read(descriptors
->remote_task
, cache
->uniquing_table
.table_address
, cache
->uniquing_table
.tableSize
, &local_table_address
, &local_table_size
);
1244 if (err
== KERN_SUCCESS
) cache
->uniquing_table
.table
= (mach_vm_address_t
*)local_table_address
;
1245 else cache
->uniquing_table
.table
= NULL
;
1247 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t
)local_uniquing_address
, (mach_vm_size_t
)local_uniquing_size
);
1252 if (descriptors
->remote_task
!= mach_task_self()) {
1253 task_resume(descriptors
->remote_task
);
1256 if (!update_snapshot
&& delta_indecies
== 0) return; // absolutely no updating needed.
1258 FILE *the_index
= (descriptors
->index_file_stream
);
1260 // prepare for the read; target process could be 32 or 64 bit.
1262 stack_logging_index_event32
*target_32_index
= NULL
;
1263 stack_logging_index_event64
*target_64_index
= NULL
;
1265 // perform the update from the file
1267 if (delta_indecies
) {
1268 char bufferSpace
[4096]; // 4 kb
1269 target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1270 target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1271 size_t number_slots
= (size_t)(4096/read_size
);
1273 size_t read_count
= 0;
1274 if (fseeko(the_index
, (off_t
)(cache
->last_index_file_offset
), SEEK_SET
)) {
1275 fprintf(stderr
, "error while attempting to cache information from remote stack index file. (update_cache_for_file_streams)\n");
1277 off_t current_index_position
= cache
->last_index_file_offset
;
1279 number_slots
= (size_t)MIN(delta_indecies
- read_this_update
, number_slots
);
1280 read_count
= fread(bufferSpace
, read_size
, number_slots
, the_index
);
1281 if (descriptors
->task_is_64_bit
) {
1282 for (i
= 0; i
< read_count
; i
++) {
1283 insert_node(cache
, STACK_LOGGING_DISGUISE(target_64_index
[i
].address
), (uint64_t)current_index_position
);
1285 current_index_position
+= read_size
;
1288 for (i
= 0; i
< read_count
; i
++) {
1289 insert_node(cache
, (mach_vm_address_t
)STACK_LOGGING_DISGUISE(target_32_index
[i
].address
), (uint64_t)current_index_position
);
1291 current_index_position
+= read_size
;
1294 } while (read_count
);
1296 if (read_this_update
< delta_indecies
) {
1297 fprintf(stderr
, "insufficient data in remote stack index file; expected more records.\n");
1299 cache
->last_index_file_offset
+= read_this_update
* read_size
;
1302 if (update_snapshot
) {
1303 target_32_index
= (stack_logging_index_event32
*)(cache
->snapshot
.index_buffer
);
1304 target_64_index
= (stack_logging_index_event64
*)(cache
->snapshot
.index_buffer
);
1306 uint32_t free_snapshot_scan_index
= cache
->snapshot
.next_free_index_buffer_offset
/ (uint32_t)read_size
;
1307 off_t current_index_position
= cache
->snapshot
.start_index_offset
;
1308 if (descriptors
->task_is_64_bit
) {
1309 for (i
= last_snapshot_scan_index
; i
< free_snapshot_scan_index
; i
++) {
1310 insert_node(cache
, STACK_LOGGING_DISGUISE(target_64_index
[i
].address
), (uint64_t)(current_index_position
+ (i
* read_size
)));
1313 for (i
= last_snapshot_scan_index
; i
< free_snapshot_scan_index
; i
++) {
1314 insert_node(cache
, (mach_vm_address_t
)STACK_LOGGING_DISGUISE(target_32_index
[i
].address
), (uint64_t)(current_index_position
+ (i
* read_size
)));
1321 destroy_cache_for_file_streams(remote_task_file_streams
*descriptors
)
1323 if (descriptors
->cache
->shmem
) {
1324 munmap(descriptors
->cache
->shmem
, sizeof(stack_buffer_shared_memory
));
1326 free(descriptors
->cache
->table_memory
);
1327 free(descriptors
->cache
);
1328 descriptors
->cache
= NULL
;
1331 #pragma mark - internal
1333 // In the stack log analysis process, find the stack logging files for target process <pid>
1334 // by scanning the temporary directory for directory entries with names of the form "stack-logs.<pid>."
1335 // If we find such a directory then open the stack logging files in there.
1336 // We might also have been passed the file path if the client first read it from __stack_log_file_path__
1337 // global variable in the target task, as will be needed if the .link cannot be put in /tmp.
1339 open_log_files(pid_t pid
, char* file_path
, remote_task_file_streams
*this_task_streams
)
1342 struct dirent
*entry
;
1343 char prefix_name
[PATH_MAX
];
1344 char pathname
[PATH_MAX
];
1346 reap_orphaned_log_files(false); // reap any left-over log files (for non-existant processes, but not for this analysis process)
1348 if (file_path
!= NULL
) {
1349 this_task_streams
->index_file_stream
= fopen(file_path
, "r");
1353 if ((dp
= opendir(_PATH_TMP
)) == NULL
) {
1357 // It's OK to use snprintf in this routine since it should only be called by the clients
1358 // of stack logging, and thus calls to malloc are OK.
1359 snprintf(prefix_name
, (size_t)PATH_MAX
, "%s%d.", stack_log_file_base_name
, pid
); // make sure to use "%s%d." rather than just "%s%d" to match the whole pid
1360 size_t prefix_length
= strlen(prefix_name
);
1362 while ( (entry
= readdir(dp
)) != NULL
) {
1363 if ( strncmp( entry
->d_name
, prefix_name
, prefix_length
) == 0 ) {
1364 snprintf(pathname
, (size_t)PATH_MAX
, "%s%s", _PATH_TMP
, entry
->d_name
);
1365 char reference_file
[PATH_MAX
];
1366 if (log_file_is_reference(pathname
, reference_file
, (size_t)PATH_MAX
)) {
1367 this_task_streams
->index_file_stream
= fopen(reference_file
, "r");
1369 this_task_streams
->index_file_stream
= fopen(pathname
, "r");
1378 static remote_task_file_streams
*
1379 retain_file_streams_for_task(task_t task
, char* file_path
)
1381 if (task
== MACH_PORT_NULL
) return NULL
;
1383 OSSpinLockLock(&remote_fd_list_lock
);
1385 // see if they're already in use
1387 for (i
= 0; i
< remote_task_fd_count
; i
++) {
1388 if (remote_fds
[i
].remote_task
== task
) {
1389 remote_fds
[i
].in_use_count
++;
1390 OSSpinLockUnlock(&remote_fd_list_lock
);
1391 return &remote_fds
[i
];
1396 uint32_t failures
= 0;
1397 if (remote_task_fd_count
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) {
1398 while (remote_fds
[next_remote_task_fd
].in_use_count
> 0) {
1399 next_remote_task_fd
++;
1400 if (next_remote_task_fd
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) next_remote_task_fd
= 0;
1402 if (failures
>= STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) {
1403 OSSpinLockUnlock(&remote_fd_list_lock
);
1407 fclose(remote_fds
[next_remote_task_fd
].index_file_stream
);
1408 destroy_cache_for_file_streams(&remote_fds
[next_remote_task_fd
]);
1412 kern_return_t err
= pid_for_task(task
, &pid
);
1413 if (err
!= KERN_SUCCESS
) {
1414 OSSpinLockUnlock(&remote_fd_list_lock
);
1418 remote_task_file_streams
*this_task_streams
= &remote_fds
[next_remote_task_fd
];
1420 open_log_files(pid
, file_path
, this_task_streams
);
1422 // check if opens failed
1423 if (this_task_streams
->index_file_stream
== NULL
) {
1424 if (this_task_streams
->index_file_stream
) fclose(this_task_streams
->index_file_stream
);
1425 OSSpinLockUnlock(&remote_fd_list_lock
);
1429 // check if target pid is running 64-bit
1430 int mib
[] = { CTL_KERN
, KERN_PROC
, KERN_PROC_PID
, pid
};
1431 struct kinfo_proc processInfo
;
1432 size_t bufsize
= sizeof(processInfo
);
1433 if (sysctl(mib
, (unsigned)(sizeof(mib
)/sizeof(int)), &processInfo
, &bufsize
, NULL
, (size_t)0) == 0 && bufsize
> 0) {
1434 this_task_streams
->task_is_64_bit
= processInfo
.kp_proc
.p_flag
& P_LP64
;
1436 this_task_streams
->task_is_64_bit
= 0;
1439 // otherwise set vars and go
1440 this_task_streams
->in_use_count
= 1;
1441 this_task_streams
->remote_task
= task
;
1442 this_task_streams
->remote_pid
= pid
;
1443 next_remote_task_fd
++;
1444 if (next_remote_task_fd
== STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
) next_remote_task_fd
= 0;
1445 remote_task_fd_count
= MIN(remote_task_fd_count
+ 1, STACK_LOGGING_MAX_SIMUL_REMOTE_TASKS_INSPECTED
);
1447 OSSpinLockUnlock(&remote_fd_list_lock
);
1448 return this_task_streams
;
1452 release_file_streams_for_task(task_t task
)
1454 OSSpinLockLock(&remote_fd_list_lock
);
1456 // decrement in-use count
1458 for (i
= 0; i
< remote_task_fd_count
; i
++) {
1459 if (remote_fds
[i
].remote_task
== task
) {
1460 remote_fds
[i
].in_use_count
--;
1465 OSSpinLockUnlock(&remote_fd_list_lock
);
1468 #pragma mark - extern
1471 // The following is used by client tools like malloc_history and Instruments to pass along the path
1472 // of the index file as read from the target task's __stack_log_file_path__ variable (set in this file)
1473 // Eventually, at a suitable point, this additional argument should just be added to the other APIs below.
1476 __mach_stack_logging_set_file_path(task_t task
, char* file_path
)
1478 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
, file_path
);
1479 if (remote_fd
== NULL
) {
1480 return KERN_FAILURE
;
1482 return KERN_SUCCESS
;
1486 __mach_stack_logging_get_frames(task_t task
, mach_vm_address_t address
, mach_vm_address_t
*stack_frames_buffer
, uint32_t max_stack_frames
, uint32_t *count
)
1488 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
, NULL
);
1489 if (remote_fd
== NULL
) {
1490 return KERN_FAILURE
;
1493 update_cache_for_file_streams(remote_fd
);
1495 uint32_t collisions
= 0;
1496 size_t hash
= hash_index(address
, remote_fd
->cache
->cache_node_capacity
);
1497 size_t multiplier
= hash_multiplier(remote_fd
->cache
->cache_node_capacity
, remote_fd
->cache
->collision_allowance
);
1498 uint64_t located_file_position
= 0;
1502 if (remote_fd
->cache
->table_memory
[hash
].address
== address
) { // hit!
1503 located_file_position
= remote_fd
->cache
->table_memory
[hash
].index_file_offset
;
1506 } else if (remote_fd
->cache
->table_memory
[hash
].address
== 0ull) { // failure!
1511 hash
= next_hash(hash
, multiplier
, remote_fd
->cache
->cache_node_capacity
, collisions
);
1513 } while (collisions
<= remote_fd
->cache
->collision_allowance
);
1516 // prepare for the read; target process could be 32 or 64 bit.
1517 stack_logging_index_event32
*target_32_index
= NULL
;
1518 stack_logging_index_event64
*target_64_index
= NULL
;
1520 if (located_file_position
>= remote_fd
->cache
->last_index_file_offset
) {
1521 // must be in shared memory
1522 if (remote_fd
->cache
->shmem
) {
1523 if (remote_fd
->task_is_64_bit
) {
1524 target_64_index
= (stack_logging_index_event64
*)(remote_fd
->cache
->snapshot
.index_buffer
+ (located_file_position
- remote_fd
->cache
->snapshot
.start_index_offset
));
1525 located_file_position
= STACK_LOGGING_OFFSET(target_64_index
->offset_and_flags
);
1527 target_32_index
= (stack_logging_index_event32
*)(remote_fd
->cache
->snapshot
.index_buffer
+ (located_file_position
- remote_fd
->cache
->snapshot
.start_index_offset
));
1528 located_file_position
= STACK_LOGGING_OFFSET(target_32_index
->offset_and_flags
);
1535 // it's written to disk
1536 char bufferSpace
[128];
1538 size_t read_size
= (remote_fd
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1539 fseeko(remote_fd
->index_file_stream
, (off_t
)located_file_position
, SEEK_SET
);
1540 size_t read_count
= fread(bufferSpace
, read_size
, (size_t)1, remote_fd
->index_file_stream
);
1542 if (remote_fd
->task_is_64_bit
) {
1543 target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1544 located_file_position
= STACK_LOGGING_OFFSET(target_64_index
->offset_and_flags
);
1546 target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1547 located_file_position
= STACK_LOGGING_OFFSET(target_32_index
->offset_and_flags
);
1555 release_file_streams_for_task(task
);
1558 return KERN_FAILURE
;
1561 return __mach_stack_logging_frames_for_uniqued_stack(task
, located_file_position
, stack_frames_buffer
, max_stack_frames
, count
);
1566 __mach_stack_logging_enumerate_records(task_t task
, mach_vm_address_t address
, void enumerator(mach_stack_logging_record_t
, void *), void *context
)
1568 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
, NULL
);
1569 if (remote_fd
== NULL
) {
1570 return KERN_FAILURE
;
1573 bool reading_all_addresses
= (address
== 0 ? true : false);
1574 mach_stack_logging_record_t pass_record
;
1575 kern_return_t err
= KERN_SUCCESS
;
1577 // update (read index file once and only once)
1578 update_cache_for_file_streams(remote_fd
);
1580 FILE *the_index
= (remote_fd
->index_file_stream
);
1582 // prepare for the read; target process could be 32 or 64 bit.
1583 char bufferSpace
[2048]; // 2 kb
1584 stack_logging_index_event32
*target_32_index
= (stack_logging_index_event32
*)bufferSpace
;
1585 stack_logging_index_event64
*target_64_index
= (stack_logging_index_event64
*)bufferSpace
;
1586 uint32_t target_addr_32
= (uint32_t)STACK_LOGGING_DISGUISE((uint32_t)address
);
1587 uint64_t target_addr_64
= STACK_LOGGING_DISGUISE((uint64_t)address
);
1588 size_t read_size
= (remote_fd
->task_is_64_bit
? sizeof(stack_logging_index_event64
) : sizeof(stack_logging_index_event32
));
1589 size_t number_slots
= (size_t)(2048/read_size
);
1590 uint64_t total_slots
= remote_fd
->cache
->last_index_file_offset
/ read_size
;
1592 // perform the search
1593 size_t read_count
= 0;
1594 int64_t current_file_offset
= 0;
1597 // at this point, we need to read index events; read them from the file until it's necessary to grab them from the shared memory snapshot
1598 // and crop file reading to the point where we last scanned
1599 number_slots
= (size_t)MIN(number_slots
, total_slots
);
1601 // if out of file to read (as of the time we entered this function), try to use shared memory snapshot
1602 if (number_slots
== 0) {
1603 if (remote_fd
->cache
->shmem
&& remote_fd
->cache
->snapshot
.start_index_offset
+ remote_fd
->cache
->snapshot
.next_free_index_buffer_offset
> (uint64_t)current_file_offset
) {
1604 // use shared memory
1605 target_32_index
= (stack_logging_index_event32
*)remote_fd
->cache
->snapshot
.index_buffer
;
1606 target_64_index
= (stack_logging_index_event64
*)remote_fd
->cache
->snapshot
.index_buffer
;
1607 read_count
= (uint32_t)(remote_fd
->cache
->snapshot
.start_index_offset
+ remote_fd
->cache
->snapshot
.next_free_index_buffer_offset
- current_file_offset
) / read_size
;
1608 current_file_offset
+= read_count
* read_size
;
1613 // get and save index (enumerator could modify)
1614 fseeko(the_index
, current_file_offset
, SEEK_SET
);
1615 read_count
= fread(bufferSpace
, read_size
, number_slots
, the_index
);
1616 current_file_offset
= ftello(the_index
);
1617 total_slots
-= read_count
;
1620 if (remote_fd
->task_is_64_bit
) {
1621 for (i
= 0; i
< read_count
; i
++) {
1622 if (reading_all_addresses
|| target_64_index
[i
].address
== target_addr_64
) {
1623 pass_record
.address
= STACK_LOGGING_DISGUISE(target_64_index
[i
].address
);
1624 pass_record
.argument
= target_64_index
[i
].argument
;
1625 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_64_index
[i
].offset_and_flags
);
1626 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_64_index
[i
].offset_and_flags
);
1627 enumerator(pass_record
, context
);
1631 for (i
= 0; i
< read_count
; i
++) {
1632 if (reading_all_addresses
|| target_32_index
[i
].address
== target_addr_32
) {
1633 pass_record
.address
= STACK_LOGGING_DISGUISE(target_32_index
[i
].address
);
1634 pass_record
.argument
= target_32_index
[i
].argument
;
1635 pass_record
.stack_identifier
= STACK_LOGGING_OFFSET(target_32_index
[i
].offset_and_flags
);
1636 pass_record
.type_flags
= STACK_LOGGING_FLAGS(target_32_index
[i
].offset_and_flags
);
1637 enumerator(pass_record
, context
);
1641 } while (read_count
);
1643 release_file_streams_for_task(task
);
1649 __mach_stack_logging_frames_for_uniqued_stack(task_t task
, uint64_t stack_identifier
, mach_vm_address_t
*stack_frames_buffer
, uint32_t max_stack_frames
, uint32_t *count
)
1651 remote_task_file_streams
*remote_fd
= retain_file_streams_for_task(task
, NULL
);
1652 if (remote_fd
== NULL
) return KERN_FAILURE
;
1654 __unwind_stack_from_table_index(&remote_fd
->cache
->uniquing_table
, stack_identifier
, stack_frames_buffer
, count
, max_stack_frames
);
1656 release_file_streams_for_task(task
);
1658 if (*count
) return KERN_SUCCESS
;
1659 else return KERN_FAILURE
;
1663 #ifdef TEST_DISK_STACK_LOGGING
1665 // cc -o stack_logging_disk stack_logging_disk.c -DTEST_DISK_STACK_LOGGING
1667 #include <sys/wait.h>
1674 size_t total_globals
= 0ul;
1676 fprintf(stderr
, "master test process is %d\n", getpid());
1677 fprintf(stderr
, "sizeof pre_write_buffers: %lu\n", sizeof(pre_write_buffers
)); total_globals
+= sizeof(pre_write_buffers
);
1678 fprintf(stderr
, "sizeof stack_buffer: %lu\n", sizeof(stack_buffer
)); total_globals
+= sizeof(stack_buffer
);
1679 fprintf(stderr
, "sizeof last_logged_malloc_address: %lu\n", sizeof(last_logged_malloc_address
)); total_globals
+= sizeof(last_logged_malloc_address
);
1680 fprintf(stderr
, "sizeof stack_log_file_base_name: %lu\n", sizeof(stack_log_file_base_name
)); total_globals
+= sizeof(stack_log_file_base_name
);
1681 fprintf(stderr
, "sizeof stack_log_file_suffix: %lu\n", sizeof(stack_log_file_suffix
)); total_globals
+= sizeof(stack_log_file_suffix
);
1682 fprintf(stderr
, "sizeof stack_log_link_suffix: %lu\n", sizeof(stack_log_link_suffix
)); total_globals
+= sizeof(stack_log_link_suffix
);
1683 fprintf(stderr
, "sizeof stack_log_location: %lu\n", (size_t)PATH_MAX
); total_globals
+= (size_t)PATH_MAX
;
1684 fprintf(stderr
, "sizeof stack_log_reference_file: %lu\n", (size_t)PATH_MAX
); total_globals
+= (size_t)PATH_MAX
;
1685 fprintf(stderr
, "sizeof __stack_log_file_path__ (index_file_path): %lu\n", (size_t)PATH_MAX
); total_globals
+= (size_t)PATH_MAX
;
1686 fprintf(stderr
, "sizeof index_file_descriptor: %lu\n", sizeof(index_file_descriptor
)); total_globals
+= sizeof(index_file_descriptor
);
1687 fprintf(stderr
, "sizeof remote_fds: %lu\n", sizeof(remote_fds
)); total_globals
+= sizeof(remote_fds
);
1688 fprintf(stderr
, "sizeof next_remote_task_fd: %lu\n", sizeof(next_remote_task_fd
)); total_globals
+= sizeof(next_remote_task_fd
);
1689 fprintf(stderr
, "sizeof remote_task_fd_count: %lu\n", sizeof(remote_task_fd_count
)); total_globals
+= sizeof(remote_task_fd_count
);
1690 fprintf(stderr
, "sizeof remote_fd_list_lock: %lu\n", sizeof(remote_fd_list_lock
)); total_globals
+= sizeof(remote_fd_list_lock
);
1691 fprintf(stderr
, "sizeof logging_use_compaction: %lu\n", sizeof(logging_use_compaction
)); total_globals
+= sizeof(logging_use_compaction
);
1693 fprintf(stderr
, "size of all global data: %lu\n", total_globals
);
1697 // create a few child processes and exit them cleanly so their logs should get cleaned up
1698 fprintf(stderr
, "\ncreating child processes and exiting cleanly\n");
1699 for (i
= 0; i
< 3; i
++) {
1701 fprintf(stderr
, "\nin child processes %d\n", getpid());
1703 fprintf(stderr
, "exiting child processes %d\n", getpid());
1709 // create a few child processes and abruptly _exit them, leaving their logs around
1710 fprintf(stderr
, "\ncreating child processes and exiting abruptly, leaving logs around\n");
1711 for (i
= 0; i
< 3; i
++) {
1713 fprintf(stderr
, "\nin child processes %d\n", getpid());
1715 fprintf(stderr
, "exiting child processes %d\n", getpid());
1721 // this should reap any remaining logs
1722 fprintf(stderr
, "\nexiting master test process %d\n", getpid());