]>
Commit | Line | Data |
---|---|---|
964d3577 A |
1 | #include <assert.h> |
2 | #include <pthread.h> | |
3 | #include <stdio.h> | |
4 | #include <stdlib.h> | |
5 | #include <string.h> | |
6 | #include <unistd.h> | |
7 | #include <stdbool.h> | |
8 | #include <errno.h> | |
9 | #include <sys/time.h> | |
10 | #include <libkern/OSAtomic.h> | |
11 | #include <dispatch/dispatch.h> | |
12 | ||
a0619f9c | 13 | #include "darwintest_defaults.h" |
2546420a A |
14 | |
15 | #define NUM_THREADS 8 | |
c6e5f90c | 16 | #define RDAR_38144536 1 |
2546420a | 17 | |
964d3577 A |
18 | struct context { |
19 | pthread_cond_t cond; | |
20 | pthread_mutex_t mutex; | |
21 | long udelay; | |
22 | long count; | |
23 | }; | |
24 | ||
2546420a | 25 | static void *wait_thread(void *ptr) { |
964d3577 A |
26 | int res; |
27 | struct context *context = ptr; | |
28 | ||
964d3577 A |
29 | bool loop = true; |
30 | while (loop) { | |
31 | struct timespec ts; | |
32 | struct timeval tv; | |
33 | gettimeofday(&tv, NULL); | |
2546420a A |
34 | tv.tv_sec += (tv.tv_usec + context->udelay) / (__typeof(tv.tv_sec)) USEC_PER_SEC; |
35 | tv.tv_usec = (tv.tv_usec + context->udelay) % (__typeof(tv.tv_usec)) USEC_PER_SEC; | |
36 | TIMEVAL_TO_TIMESPEC(&tv, &ts); | |
964d3577 A |
37 | |
38 | res = pthread_mutex_lock(&context->mutex); | |
39 | if (res) { | |
40 | fprintf(stderr, "[%ld] pthread_mutex_lock: %s\n", context->count, strerror(res)); | |
41 | abort(); | |
42 | } | |
43 | ||
44 | if (context->count > 0) { | |
45 | res = pthread_cond_timedwait(&context->cond, &context->mutex, &ts); | |
46 | if (res != ETIMEDOUT) { | |
47 | fprintf(stderr, "[%ld] pthread_cond_timedwait: %s\n", context->count, strerror(res)); | |
48 | abort(); | |
49 | } | |
50 | --context->count; | |
51 | } else { | |
52 | loop = false; | |
53 | } | |
54 | ||
55 | res = pthread_mutex_unlock(&context->mutex); | |
56 | if (res) { | |
57 | fprintf(stderr, "[%ld] pthread_mutex_unlock: %s\n", context->count, strerror(res)); | |
58 | abort(); | |
59 | } | |
60 | } | |
61 | ||
62 | return NULL; | |
63 | } | |
64 | ||
2546420a | 65 | T_DECL(cond_timedwait_timeout, "pthread_cond_timedwait() timeout") |
964d3577 | 66 | { |
2546420a A |
67 | // This testcase launches 8 threads that all perform timed wait on the same |
68 | // conditional variable that is not being signaled in a loop. Ater the total | |
69 | // of 8000 timeouts all threads finish and the testcase prints out the | |
70 | // expected time (5[ms]*8000[timeouts]/8[threads]=5s) vs elapsed time. | |
964d3577 A |
71 | struct context context = { |
72 | .cond = PTHREAD_COND_INITIALIZER, | |
73 | .mutex = PTHREAD_MUTEX_INITIALIZER, | |
74 | .udelay = 5000, | |
75 | .count = 8000, | |
76 | }; | |
964d3577 | 77 | |
2546420a A |
78 | long uexpected = (context.udelay * context.count) / NUM_THREADS; |
79 | T_LOG("waittime expected: %ld us", uexpected); | |
964d3577 A |
80 | struct timeval start, end; |
81 | gettimeofday(&start, NULL); | |
82 | ||
2546420a A |
83 | pthread_t p[NUM_THREADS]; |
84 | for (int i = 0; i < NUM_THREADS; ++i) { | |
85 | T_ASSERT_POSIX_ZERO(pthread_create(&p[i], NULL, wait_thread, &context), | |
86 | "pthread_create"); | |
964d3577 A |
87 | } |
88 | ||
2546420a | 89 | usleep((useconds_t) uexpected); |
964d3577 A |
90 | bool loop = true; |
91 | while (loop) { | |
a0619f9c | 92 | T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex), |
2546420a | 93 | "pthread_mutex_lock"); |
964d3577 A |
94 | if (context.count <= 0) { |
95 | loop = false; | |
96 | } | |
a0619f9c | 97 | T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex), |
2546420a | 98 | "pthread_mutex_unlock"); |
964d3577 A |
99 | } |
100 | ||
2546420a A |
101 | for (int i = 0; i < NUM_THREADS; ++i) { |
102 | T_ASSERT_POSIX_ZERO(pthread_join(p[i], NULL), "pthread_join"); | |
964d3577 A |
103 | } |
104 | ||
105 | gettimeofday(&end, NULL); | |
2546420a A |
106 | uint64_t uelapsed = |
107 | ((uint64_t) end.tv_sec * USEC_PER_SEC + (uint64_t) end.tv_usec) - | |
108 | ((uint64_t) start.tv_sec * USEC_PER_SEC + (uint64_t) start.tv_usec); | |
109 | T_LOG("waittime actual: %llu us", uelapsed); | |
964d3577 | 110 | } |
a0619f9c A |
111 | |
112 | struct prodcons_context { | |
113 | pthread_cond_t cond; | |
114 | pthread_mutex_t mutex; | |
115 | bool consumer_ready; | |
116 | bool workitem_available; | |
117 | bool padding[6]; | |
118 | }; | |
119 | ||
120 | static void *consumer_thread(void *ptr) { | |
121 | struct prodcons_context *context = ptr; | |
122 | ||
123 | // tell producer thread that we are ready | |
124 | T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context->mutex), "pthread_mutex_lock"); | |
125 | ||
126 | context->consumer_ready = true; | |
127 | T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context->cond), "pthread_cond_signal"); | |
128 | ||
129 | // wait for a work item to become available | |
130 | do { | |
131 | // mutex will be dropped and allow producer thread to acquire | |
132 | T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context->cond, &context->mutex), "pthread_cond_wait"); | |
133 | ||
134 | // loop in case of spurious wakeups | |
135 | } while (context->workitem_available == false); | |
136 | ||
137 | // work item has been sent, so dequeue it and tell producer | |
138 | context->workitem_available = false; | |
139 | T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context->cond), "pthread_cond_signal"); | |
140 | ||
141 | // unlock mutex, we are done here | |
142 | T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context->mutex), "pthread_mutex_unlock"); | |
143 | ||
144 | T_PASS("Consumer thread exiting"); | |
145 | ||
146 | return NULL; | |
147 | } | |
148 | ||
149 | #define TESTCASE_TIMEOUT (10) /* seconds */ | |
150 | typedef enum { | |
151 | eNullTimeout, | |
152 | eZeroTimeout, | |
153 | eBeforeEpochTimeout, | |
154 | eRecentPastTimeout | |
155 | } TimeOutType; | |
156 | ||
157 | static DT_TEST_RETURN cond_timedwait_timeouts_internal(TimeOutType timeout, bool relative); | |
158 | ||
159 | T_DECL(cond_timedwait_nulltimeout, "pthread_cond_timedwait() with NULL timeout, ensure mutex is unlocked") | |
160 | { | |
161 | cond_timedwait_timeouts_internal(eNullTimeout, false); | |
162 | } | |
163 | ||
164 | T_DECL(cond_timedwait_zerotimeout, "pthread_cond_timedwait() with zero timeout, ensure mutex is unlocked") | |
165 | { | |
c6e5f90c A |
166 | #if RDAR_38144536 |
167 | T_SKIP("skipped <rdar://38144536>"); | |
168 | #else // RDAR_38144536 | |
a0619f9c | 169 | cond_timedwait_timeouts_internal(eZeroTimeout, false); |
c6e5f90c | 170 | #endif // RDAR_38144536 |
a0619f9c A |
171 | } |
172 | ||
173 | T_DECL(cond_timedwait_beforeepochtimeout, "pthread_cond_timedwait() with timeout before the epoch, ensure mutex is unlocked") | |
174 | { | |
c6e5f90c A |
175 | #if RDAR_38144536 |
176 | T_SKIP("skipped <rdar://38144536>"); | |
177 | #else // RDAR_38144536 | |
a0619f9c | 178 | cond_timedwait_timeouts_internal(eBeforeEpochTimeout, false); |
c6e5f90c | 179 | #endif // RDAR_38144536 |
a0619f9c A |
180 | } |
181 | ||
182 | T_DECL(cond_timedwait_pasttimeout, "pthread_cond_timedwait() with timeout in the past, ensure mutex is unlocked") | |
183 | { | |
c6e5f90c A |
184 | #if RDAR_38144536 |
185 | T_SKIP("skipped <rdar://38144536>"); | |
186 | #else // RDAR_38144536 | |
a0619f9c | 187 | cond_timedwait_timeouts_internal(eRecentPastTimeout, false); |
c6e5f90c | 188 | #endif // RDAR_38144536 |
a0619f9c A |
189 | } |
190 | ||
191 | T_DECL(cond_timedwait_relative_nulltimeout, "pthread_cond_timedwait_relative_np() with relative NULL timeout, ensure mutex is unlocked") | |
192 | { | |
193 | cond_timedwait_timeouts_internal(eNullTimeout, true); | |
194 | } | |
195 | ||
196 | T_DECL(cond_timedwait_relative_pasttimeout, "pthread_cond_timedwait_relative_np() with relative timeout in the past, ensure mutex is unlocked") | |
197 | { | |
198 | cond_timedwait_timeouts_internal(eRecentPastTimeout, true); | |
199 | } | |
200 | ||
201 | static DT_TEST_RETURN cond_timedwait_timeouts_internal(TimeOutType timeout, bool relative) | |
202 | { | |
203 | // This testcase mimics a producer-consumer model where the consumer checks | |
204 | // in and waits until work becomes available. The producer then waits until | |
205 | // the work has been consumed and the consumer quiesces. Since condition | |
206 | // variables may have spurious wakeups, the timeout should not matter, | |
207 | // but there have been functional issues where the mutex would not be unlocked | |
208 | // for a timeout in the past. | |
209 | struct prodcons_context context = { | |
210 | .cond = PTHREAD_COND_INITIALIZER, | |
211 | .mutex = PTHREAD_MUTEX_INITIALIZER, | |
212 | .consumer_ready = false, | |
213 | .workitem_available = false | |
214 | }; | |
215 | ||
216 | struct timeval test_timeout; | |
217 | gettimeofday(&test_timeout, NULL); | |
218 | test_timeout.tv_sec += TESTCASE_TIMEOUT; | |
219 | ||
220 | T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex), "pthread_mutex_lock"); | |
221 | ||
222 | pthread_t p; | |
223 | T_ASSERT_POSIX_ZERO(pthread_create(&p, NULL, consumer_thread, &context), | |
224 | "pthread_create"); | |
225 | ||
226 | // Wait until consumer thread is able to acquire the mutex, check in, and block | |
227 | // in its own condition variable. We do not want to start generating work before | |
228 | // the consumer thread is available | |
229 | do { | |
230 | // mutex will be dropped and allow consumer thread to acquire | |
231 | T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context.cond, &context.mutex), "pthread_cond_wait"); | |
232 | ||
233 | // loop in case of spurious wakeups | |
234 | } while (context.consumer_ready == false); | |
235 | ||
236 | // consumer is ready and blocked in its own condition variable, and | |
237 | // producer has mutex acquired. Send a work item and wait for it | |
238 | // to be dequeued | |
239 | ||
240 | context.workitem_available = true; | |
241 | T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context.cond), "pthread_cond_signal"); | |
242 | ||
243 | do { | |
244 | struct timeval now; | |
245 | ||
246 | gettimeofday(&now, NULL); | |
247 | T_QUIET; T_ASSERT_TRUE(timercmp(&now, &test_timeout, <), "timeout reached waiting for consumer thread to consume"); | |
248 | ||
249 | struct timespec ts; | |
250 | ||
251 | if (relative) { | |
252 | switch (timeout) { | |
253 | case eNullTimeout: | |
254 | break; | |
255 | case eRecentPastTimeout: | |
256 | ts.tv_sec = -1; | |
257 | ts.tv_nsec = 0; | |
258 | break; | |
259 | case eZeroTimeout: | |
260 | case eBeforeEpochTimeout: | |
261 | break; | |
262 | } | |
263 | } else { | |
264 | switch (timeout) { | |
265 | case eNullTimeout: | |
266 | break; | |
267 | case eZeroTimeout: | |
268 | ts.tv_sec = 0; | |
269 | ts.tv_nsec = 0; | |
270 | break; | |
271 | case eBeforeEpochTimeout: | |
272 | ts.tv_sec = -1; | |
273 | ts.tv_nsec = 0; | |
274 | break; | |
275 | case eRecentPastTimeout: | |
276 | ts.tv_sec = now.tv_sec - 1; | |
277 | ts.tv_nsec = now.tv_usec / 1000; | |
278 | break; | |
279 | } | |
280 | } | |
281 | ||
282 | int ret; | |
283 | if (relative) { | |
284 | ret = pthread_cond_timedwait_relative_np(&context.cond, &context.mutex, timeout == eNullTimeout ? NULL : &ts); | |
285 | } else { | |
286 | ret = pthread_cond_timedwait(&context.cond, &context.mutex, timeout == eNullTimeout ? NULL : &ts); | |
287 | } | |
288 | if (ret != 0 && ret != EINTR && ret != ETIMEDOUT) T_ASSERT_POSIX_ZERO(ret, "timedwait returned error"); | |
289 | ||
290 | usleep(10*1000); // avoid spinning in a CPU-bound loop | |
291 | ||
292 | // loop in case of spurious wakeups | |
293 | } while (context.workitem_available == true); | |
294 | ||
295 | T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex), "pthread_mutex_unlock"); | |
296 | ||
297 | T_ASSERT_POSIX_ZERO(pthread_join(p, NULL), "pthread_join"); | |
298 | ||
299 | T_PASS("Consumer completed work"); | |
300 | } |