3 #include <mach/task_info.h>
8 #include <sys/kern_memorystatus.h>
9 #include <sys/sysctl.h>
10 #include <stdatomic.h>
12 #include <darwintest.h>
13 #include <TargetConditionals.h>
16 #define MALLOC_SIZE_PER_THREAD (64 * KB)
17 #define freezer_path "/usr/local/bin/freeze"
19 /* BridgeOS could spend more time execv freezer */
21 static int timeout
= 600;
23 static int timeout
= 120;
26 static _Atomic
int thread_malloc_count
= 0;
27 static _Atomic
int thread_thawed_count
= 0;
28 static _Atomic
int phase
= 0;
42 sprintf(pid_str
, "%d", pid
);
46 args
[0] = freezer_path
;
49 execv(freezer_path
, args
);
50 /* execve() does not return on success */
52 T_FAIL("execve() failed");
55 /* Wait for freezer to complete */
56 T_LOG("Waiting for freezer %d to complete", child_pid
);
57 while (0 == waitpid(child_pid
, &status
, WNOHANG
)) {
59 kill(child_pid
, SIGKILL
);
60 T_FAIL("Freezer took too long to freeze the test");
65 if (WIFEXITED(status
) != 1 || WEXITSTATUS(status
) != 0) {
66 T_FAIL("Freezer error'd out");
70 worker_thread_function(void *args
)
72 struct thread_args
*targs
= args
;
73 int thread_id
= targs
->id
;
77 array
= malloc(MALLOC_SIZE_PER_THREAD
);
78 T_EXPECT_NOTNULL(array
, "thread %d allocated heap memory to be dirtied", thread_id
);
80 /* Waiting for phase 1 (touch pages) to start */
81 while (atomic_load(&phase
) != 1) {
85 /* Phase 1: touch pages */
86 T_LOG("thread %d phase 1: dirtying %d heap pages (%d bytes)", thread_id
, MALLOC_SIZE_PER_THREAD
/ (int)PAGE_SIZE
, MALLOC_SIZE_PER_THREAD
);
87 memset(&array
[0], 1, MALLOC_SIZE_PER_THREAD
);
88 atomic_fetch_add(&thread_malloc_count
, 1);
90 /* Wait for process to be frozen */
91 while (atomic_load(&phase
) != 2) {
95 /* Phase 2, process thawed, trigger decompressions by re-faulting pages */
96 T_LOG("thread %d phase 2: faulting pages back in to trigger decompressions", thread_id
);
97 memset(&array
[0], 1, MALLOC_SIZE_PER_THREAD
);
99 /* Main thread will retrieve vm statistics once all threads are thawed */
100 atomic_fetch_add(&thread_thawed_count
, 1);
105 #if 0 /* Test if the thread's decompressions counter was added to the task decompressions counter when a thread terminates */
115 create_threads(int nthreads
, pthread_t
*threads
, struct thread_args
*targs
)
121 err
= pthread_attr_init(&attr
);
122 T_ASSERT_POSIX_ZERO(err
, "pthread_attr_init");
123 for (i
= 0; i
< nthreads
; i
++) {
125 err
= pthread_create(&threads
[i
], &attr
, worker_thread_function
, (void*)&targs
[i
]);
126 T_QUIET
; T_ASSERT_POSIX_ZERO(err
, "pthread_create");
133 join_threads(int nthreads
, pthread_t
*threads
)
138 for (i
= 0; i
< nthreads
; i
++) {
139 err
= pthread_join(threads
[i
], NULL
);
140 T_QUIET
; T_ASSERT_POSIX_ZERO(err
, "pthread_join");
144 T_DECL(task_vm_info_decompressions
,
145 "Test multithreaded per-task decompressions counter")
149 size_t ncpu_size
= sizeof(ncpu
);
152 size_t compressor_mode_size
= sizeof(compressor_mode
);
153 task_vm_info_data_t vm_info
;
154 mach_msg_type_number_t count
;
156 struct thread_args
*targs
;
160 /* Make sure freezer is enabled on target machine */
161 err
= sysctlbyname("vm.compressor_mode", &compressor_mode
, &compressor_mode_size
, NULL
, 0);
162 if (compressor_mode
< 8) {
163 T_SKIP("This test requires freezer which is not available on the testing platform (vm.compressor_mode is set to %d)", compressor_mode
);
166 T_SKIP("This test requires freezer which is not available on bridgeOS (vm.compressor_mode is set to %d)", compressor_mode
);
169 /* Set number of threads to ncpu available on testing device */
170 err
= sysctlbyname("hw.ncpu", &ncpu
, &ncpu_size
, NULL
, 0);
171 T_EXPECT_EQ_INT(0, err
, "Detected %d cpus\n", ncpu
);
173 /* Set total number of pages to be frozen */
174 npages
= ncpu
* MALLOC_SIZE_PER_THREAD
/ (int)PAGE_SIZE
;
175 T_LOG("Test will be freezing at least %d heap pages\n", npages
);
177 /* Change state to freezable */
178 err
= memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE
, getpid(), (uint32_t)1, NULL
, 0);
179 T_EXPECT_EQ(KERN_SUCCESS
, err
, "set pid %d to be freezable", getpid());
181 /* Call into kernel to retrieve vm_info and make sure we do not have any decompressions before the test */
182 count
= TASK_VM_INFO_COUNT
;
183 err
= task_info(mach_task_self(), TASK_VM_INFO
, (task_info_t
)&vm_info
, &count
);
184 T_EXPECT_EQ(count
, TASK_VM_INFO_COUNT
, "count == TASK_VM_INFO_COUNT: %d", count
);
185 T_EXPECT_EQ_INT(0, err
, "task_info(TASK_VM_INFO) returned 0");
186 T_EXPECT_EQ_INT(0, vm_info
.decompressions
, "Expected 0 decompressions before test starts");
189 threads
= malloc(sizeof(pthread_t
) * (size_t)ncpu
);
190 targs
= malloc(sizeof(struct thread_args
) * (size_t)ncpu
);
194 /* Phase 1: create threads to write to malloc memory */
195 create_threads(ncpu
, threads
, targs
);
196 atomic_fetch_add(&phase
, 1);
198 /* Wait for all threads to dirty their malloc pages */
199 while (atomic_load(&thread_malloc_count
) != ncpu
) {
202 T_EXPECT_EQ(ncpu
, atomic_load(&thread_malloc_count
), "%d threads finished writing to malloc pages\n", ncpu
);
204 /* Launch freezer to compress the dirty pages */
205 T_LOG("Running freezer to compress pages for pid %d", getpid());
206 freeze_pid(getpid());
208 /* Phase 2: triger decompression in threads */
209 atomic_fetch_add(&phase
, 1);
211 /* Wait for all threads to decompress their malloc pages */
212 while (atomic_load(&thread_thawed_count
) != ncpu
) {
216 /* Phase 3: Call into kernel to retrieve vm_info and to get the updated decompressions counter */
217 count
= TASK_VM_INFO_COUNT
;
218 err
= task_info(mach_task_self(), TASK_VM_INFO
, (task_info_t
)&vm_info
, &count
);
219 T_EXPECT_EQ(count
, TASK_VM_INFO_COUNT
, "count == TASK_VM_INFO_COUNT: %d", count
);
220 T_EXPECT_EQ(0, err
, "task_info(TASK_VM_INFO) returned 0");
222 /* Make sure this task has decompressed at least all of the dirtied memory */
223 T_EXPECT_GE_INT(vm_info
.decompressions
, npages
, "decompressed %d pages (>= heap pages: %d)", vm_info
.decompressions
, npages
);
224 T_PASS("Correctly retrieve per-task decompressions stats");
227 join_threads(ncpu
, threads
);