]>
Commit | Line | Data |
---|---|---|
cb323159 A |
1 | #include <stdlib.h> |
2 | #include <stdio.h> | |
3 | #include <mach/task_info.h> | |
4 | #include <mach/mach.h> | |
5 | #include <unistd.h> | |
6 | #include <signal.h> | |
7 | #include <errno.h> | |
8 | #include <sys/kern_memorystatus.h> | |
9 | #include <sys/sysctl.h> | |
10 | #include <stdatomic.h> | |
11 | ||
12 | #include <darwintest.h> | |
13 | #include <TargetConditionals.h> | |
14 | ||
15 | #define KB 1024 | |
16 | #define MALLOC_SIZE_PER_THREAD (64 * KB) | |
17 | #define freezer_path "/usr/local/bin/freeze" | |
18 | ||
19 | /* BridgeOS could spend more time execv freezer */ | |
20 | #if TARGET_OS_BRIDGE | |
21 | static int timeout = 600; | |
22 | #else | |
23 | static int timeout = 120; | |
24 | #endif | |
25 | ||
26 | static _Atomic int thread_malloc_count = 0; | |
27 | static _Atomic int thread_thawed_count = 0; | |
28 | static _Atomic int phase = 0; | |
29 | ||
30 | struct thread_args { | |
31 | int id; | |
32 | }; | |
33 | ||
34 | static void | |
35 | freeze_pid(pid_t pid) | |
36 | { | |
37 | char pid_str[6]; | |
38 | char *args[3]; | |
39 | pid_t child_pid; | |
40 | int status; | |
41 | ||
42 | sprintf(pid_str, "%d", pid); | |
43 | child_pid = fork(); | |
44 | if (child_pid == 0) { | |
45 | /* Launch freezer */ | |
46 | args[0] = freezer_path; | |
47 | args[1] = pid_str; | |
48 | args[2] = NULL; | |
49 | execv(freezer_path, args); | |
50 | /* execve() does not return on success */ | |
51 | perror("execve"); | |
52 | T_FAIL("execve() failed"); | |
53 | } | |
54 | ||
55 | /* Wait for freezer to complete */ | |
56 | T_LOG("Waiting for freezer %d to complete", child_pid); | |
57 | while (0 == waitpid(child_pid, &status, WNOHANG)) { | |
58 | if (timeout < 0) { | |
59 | kill(child_pid, SIGKILL); | |
60 | T_FAIL("Freezer took too long to freeze the test"); | |
61 | } | |
62 | sleep(1); | |
63 | timeout--; | |
64 | } | |
65 | if (WIFEXITED(status) != 1 || WEXITSTATUS(status) != 0) { | |
66 | T_FAIL("Freezer error'd out"); | |
67 | } | |
68 | } | |
69 | static void * | |
70 | worker_thread_function(void *args) | |
71 | { | |
72 | struct thread_args *targs = args; | |
73 | int thread_id = targs->id; | |
74 | char *array; | |
75 | ||
76 | /* Allocate memory */ | |
77 | array = malloc(MALLOC_SIZE_PER_THREAD); | |
78 | T_EXPECT_NOTNULL(array, "thread %d allocated heap memory to be dirtied", thread_id); | |
79 | ||
80 | /* Waiting for phase 1 (touch pages) to start */ | |
81 | while (atomic_load(&phase) != 1) { | |
82 | ; | |
83 | } | |
84 | ||
85 | /* Phase 1: touch pages */ | |
86 | T_LOG("thread %d phase 1: dirtying %d heap pages (%d bytes)", thread_id, MALLOC_SIZE_PER_THREAD / (int)PAGE_SIZE, MALLOC_SIZE_PER_THREAD); | |
87 | memset(&array[0], 1, MALLOC_SIZE_PER_THREAD); | |
88 | atomic_fetch_add(&thread_malloc_count, 1); | |
89 | ||
90 | /* Wait for process to be frozen */ | |
91 | while (atomic_load(&phase) != 2) { | |
92 | ; | |
93 | } | |
94 | ||
95 | /* Phase 2, process thawed, trigger decompressions by re-faulting pages */ | |
96 | T_LOG("thread %d phase 2: faulting pages back in to trigger decompressions", thread_id); | |
97 | memset(&array[0], 1, MALLOC_SIZE_PER_THREAD); | |
98 | ||
99 | /* Main thread will retrieve vm statistics once all threads are thawed */ | |
100 | atomic_fetch_add(&thread_thawed_count, 1); | |
101 | ||
102 | free(array); | |
103 | ||
104 | ||
105 | #if 0 /* Test if the thread's decompressions counter was added to the task decompressions counter when a thread terminates */ | |
106 | if (thread_id < 2) { | |
107 | sleep(10); | |
108 | } | |
109 | #endif | |
110 | ||
111 | return NULL; | |
112 | } | |
113 | ||
114 | static pthread_t* | |
115 | create_threads(int nthreads, pthread_t *threads, struct thread_args *targs) | |
116 | { | |
117 | int i; | |
118 | int err; | |
119 | pthread_attr_t attr; | |
120 | ||
121 | err = pthread_attr_init(&attr); | |
122 | T_ASSERT_POSIX_ZERO(err, "pthread_attr_init"); | |
123 | for (i = 0; i < nthreads; i++) { | |
124 | targs[i].id = i; | |
125 | err = pthread_create(&threads[i], &attr, worker_thread_function, (void*)&targs[i]); | |
126 | T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_create"); | |
127 | } | |
128 | ||
129 | return threads; | |
130 | } | |
131 | ||
132 | static void | |
133 | join_threads(int nthreads, pthread_t *threads) | |
134 | { | |
135 | int i; | |
136 | int err; | |
137 | ||
138 | for (i = 0; i < nthreads; i++) { | |
139 | err = pthread_join(threads[i], NULL); | |
140 | T_QUIET; T_ASSERT_POSIX_ZERO(err, "pthread_join"); | |
141 | } | |
142 | } | |
143 | ||
144 | T_DECL(task_vm_info_decompressions, | |
145 | "Test multithreaded per-task decompressions counter") | |
146 | { | |
147 | int err; | |
148 | int ncpu; | |
149 | size_t ncpu_size = sizeof(ncpu); | |
150 | int npages; | |
151 | int compressor_mode; | |
152 | size_t compressor_mode_size = sizeof(compressor_mode); | |
153 | task_vm_info_data_t vm_info; | |
154 | mach_msg_type_number_t count; | |
155 | pthread_t *threads; | |
156 | struct thread_args *targs; | |
157 | ||
158 | T_SETUPBEGIN; | |
159 | ||
160 | /* Make sure freezer is enabled on target machine */ | |
161 | err = sysctlbyname("vm.compressor_mode", &compressor_mode, &compressor_mode_size, NULL, 0); | |
162 | if (compressor_mode < 8) { | |
163 | T_SKIP("This test requires freezer which is not available on the testing platform (vm.compressor_mode is set to %d)", compressor_mode); | |
164 | } | |
165 | #if TARGET_OS_BRIDGE | |
166 | T_SKIP("This test requires freezer which is not available on bridgeOS (vm.compressor_mode is set to %d)", compressor_mode); | |
167 | #endif | |
168 | ||
169 | /* Set number of threads to ncpu available on testing device */ | |
170 | err = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0); | |
171 | T_EXPECT_EQ_INT(0, err, "Detected %d cpus\n", ncpu); | |
172 | ||
173 | /* Set total number of pages to be frozen */ | |
174 | npages = ncpu * MALLOC_SIZE_PER_THREAD / (int)PAGE_SIZE; | |
175 | T_LOG("Test will be freezing at least %d heap pages\n", npages); | |
176 | ||
177 | /* Change state to freezable */ | |
178 | err = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, getpid(), (uint32_t)1, NULL, 0); | |
179 | T_EXPECT_EQ(KERN_SUCCESS, err, "set pid %d to be freezable", getpid()); | |
180 | ||
181 | /* Call into kernel to retrieve vm_info and make sure we do not have any decompressions before the test */ | |
182 | count = TASK_VM_INFO_COUNT; | |
183 | err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count); | |
184 | T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count); | |
185 | T_EXPECT_EQ_INT(0, err, "task_info(TASK_VM_INFO) returned 0"); | |
186 | T_EXPECT_EQ_INT(0, vm_info.decompressions, "Expected 0 decompressions before test starts"); | |
187 | ||
188 | /* Thread data */ | |
189 | threads = malloc(sizeof(pthread_t) * (size_t)ncpu); | |
190 | targs = malloc(sizeof(struct thread_args) * (size_t)ncpu); | |
191 | ||
192 | T_SETUPEND; | |
193 | ||
194 | /* Phase 1: create threads to write to malloc memory */ | |
195 | create_threads(ncpu, threads, targs); | |
196 | atomic_fetch_add(&phase, 1); | |
197 | ||
198 | /* Wait for all threads to dirty their malloc pages */ | |
199 | while (atomic_load(&thread_malloc_count) != ncpu) { | |
200 | sleep(1); | |
201 | } | |
202 | T_EXPECT_EQ(ncpu, atomic_load(&thread_malloc_count), "%d threads finished writing to malloc pages\n", ncpu); | |
203 | ||
204 | /* Launch freezer to compress the dirty pages */ | |
205 | T_LOG("Running freezer to compress pages for pid %d", getpid()); | |
206 | freeze_pid(getpid()); | |
207 | ||
208 | /* Phase 2: triger decompression in threads */ | |
209 | atomic_fetch_add(&phase, 1); | |
210 | ||
211 | /* Wait for all threads to decompress their malloc pages */ | |
212 | while (atomic_load(&thread_thawed_count) != ncpu) { | |
213 | sleep(1); | |
214 | } | |
215 | ||
216 | /* Phase 3: Call into kernel to retrieve vm_info and to get the updated decompressions counter */ | |
217 | count = TASK_VM_INFO_COUNT; | |
218 | err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)&vm_info, &count); | |
219 | T_EXPECT_EQ(count, TASK_VM_INFO_COUNT, "count == TASK_VM_INFO_COUNT: %d", count); | |
220 | T_EXPECT_EQ(0, err, "task_info(TASK_VM_INFO) returned 0"); | |
221 | ||
222 | /* Make sure this task has decompressed at least all of the dirtied memory */ | |
223 | T_EXPECT_GE_INT(vm_info.decompressions, npages, "decompressed %d pages (>= heap pages: %d)", vm_info.decompressions, npages); | |
224 | T_PASS("Correctly retrieve per-task decompressions stats"); | |
225 | ||
226 | /* Cleanup */ | |
227 | join_threads(ncpu, threads); | |
228 | free(threads); | |
229 | free(targs); | |
230 | } |