]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/libMicro/cascade_cond.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / tools / tests / libMicro / cascade_cond.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms
5 * of the Common Development and Distribution License
6 * (the "License"). You may not use this file except
7 * in compliance with the License.
8 *
9 * You can obtain a copy of the license at
10 * src/OPENSOLARIS.LICENSE
11 * or http://www.opensolaris.org/os/licensing.
12 * See the License for the specific language governing
13 * permissions and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL
16 * HEADER in each file and include the License file at
17 * usr/src/OPENSOLARIS.LICENSE. If applicable,
18 * add the following below this CDDL HEADER, with the
19 * fields enclosed by brackets "[]" replaced with your
20 * own identifying information: Portions Copyright [yyyy]
21 * [name of copyright owner]
22 *
23 * CDDL HEADER END
24 */
25
26 /*
27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31 /*
32 * The "cascade" test case is a multiprocess/multithread batten-passing model
33 * using lock primitives alone for synchronisation. Threads are arranged in a
34 * ring. Each thread has two locks of its own on which it blocks, and is able
35 * to manipulate the two locks belonging to the thread which follows it in the
36 * ring.
37 *
38 * The number of threads (nthreads) is specified by the generic libMicro -P/-T
39 * options. With nthreads == 1 (the default) the uncontended case can be timed.
40 *
41 * The main logic is generic and allows any simple blocking API to be tested.
42 * The API-specific component is clearly indicated.
43 */
44
45 #include <unistd.h>
46 #include <stdlib.h>
47 #include <stdio.h>
48 #include <pthread.h>
49 #include <sys/mman.h>
50
51 #include "libmicro.h"
52
53 typedef struct {
54 int ts_once;
55 int ts_id;
56 int ts_us0; /* our lock indices */
57 int ts_us1;
58 int ts_them0; /* their lock indices */
59 int ts_them1;
60 } tsd_t;
61
62 static int nthreads;
63
64 /*
65 * API-specific code BEGINS here
66 */
67
68 static int opto = 0;
69 static int opts = 0;
70 static int nlocks;
71 static pthread_mutex_t *mxs;
72 static pthread_cond_t *cvs;
73 static int *conds;
74
75 int
76 benchmark_init()
77 {
78 lm_tsdsize = sizeof (tsd_t);
79
80 (void) sprintf(lm_optstr, "os");
81
82 lm_defN = "cscd_cond";
83
84 (void) sprintf(lm_usage,
85 " [-o] (do signal outside mutex)\n"
86 " [-s] (force PTHREAD_PROCESS_SHARED)\n"
87 "notes: thread cascade using pthread_conds\n");
88
89 return (0);
90 }
91
92 /*ARGSUSED*/
93 int
94 benchmark_optswitch(int opt, char *optarg)
95 {
96 switch (opt) {
97 case 'o':
98 opto = 1;
99 break;
100 case 's':
101 opts = 1;
102 break;
103 default:
104 return (-1);
105 }
106 return (0);
107 }
108
109 int
110 benchmark_initrun()
111 {
112 int i;
113 int e = 0;
114 pthread_mutexattr_t ma;
115 pthread_condattr_t ca;
116
117 nthreads = lm_optP * lm_optT;
118 nlocks = nthreads * 2;
119 /*LINTED*/
120 mxs = (pthread_mutex_t *)mmap(NULL,
121 nlocks * sizeof (pthread_mutex_t),
122 PROT_READ | PROT_WRITE,
123 MAP_ANON | MAP_SHARED,
124 -1, 0L);
125 if (mxs == MAP_FAILED) {
126 return (1);
127 }
128
129 /*LINTED*/
130 cvs = (pthread_cond_t *)mmap(NULL,
131 nlocks * sizeof (pthread_cond_t),
132 PROT_READ | PROT_WRITE,
133 MAP_ANON | MAP_SHARED,
134 -1, 0L);
135 if (cvs == MAP_FAILED) {
136 return (1);
137 }
138
139 /*LINTED*/
140 conds = (int *)mmap(NULL,
141 nlocks * sizeof (pthread_cond_t),
142 PROT_READ | PROT_WRITE,
143 MAP_ANON | MAP_SHARED,
144 -1, 0L);
145 if (conds == MAP_FAILED) {
146 return (1);
147 }
148
149 (void) pthread_mutexattr_init(&ma);
150 (void) pthread_condattr_init(&ca);
151 if (lm_optP > 1 || opts) {
152 (void) pthread_mutexattr_setpshared(&ma,
153 PTHREAD_PROCESS_SHARED);
154 (void) pthread_condattr_setpshared(&ca,
155 PTHREAD_PROCESS_SHARED);
156 } else {
157 (void) pthread_mutexattr_setpshared(&ma,
158 PTHREAD_PROCESS_PRIVATE);
159 (void) pthread_condattr_setpshared(&ca,
160 PTHREAD_PROCESS_PRIVATE);
161 }
162
163 for (i = 0; i < nlocks; i++) {
164 (void) pthread_mutex_init(&mxs[i], &ma);
165 (void) pthread_cond_init(&cvs[i], &ca);
166 conds[i] = 0;
167 }
168
169 return (e);
170 }
171
172 int
173 block(int index)
174 {
175 (void) pthread_mutex_lock(&mxs[index]);
176 while (conds[index] != 0) {
177 (void) pthread_cond_wait(&cvs[index], &mxs[index]);
178 }
179 conds[index] = 1;
180 (void) pthread_mutex_unlock(&mxs[index]);
181
182 return (0);
183 }
184
185 int
186 unblock(int index)
187 {
188 (void) pthread_mutex_lock(&mxs[index]);
189 conds[index] = 0;
190 if (opto) {
191 (void) pthread_mutex_unlock(&mxs[index]);
192 (void) pthread_cond_signal(&cvs[index]);
193 } else {
194 (void) pthread_cond_signal(&cvs[index]);
195 (void) pthread_mutex_unlock(&mxs[index]);
196 }
197 return (0);
198 }
199
200 /*
201 * API-specific code ENDS here
202 */
203
204 int
205 benchmark_initbatch(void *tsd)
206 {
207 tsd_t *ts = (tsd_t *)tsd;
208 int e = 0;
209
210 if (ts->ts_once == 0) {
211 int us, them;
212
213 #if !defined(__APPLE__)
214 us = (getpindex() * lm_optT) + gettindex();
215 #else
216 us = gettsdindex(tsd);
217 #endif /* __APPLE__ */
218
219 them = (us + 1) % (lm_optP * lm_optT);
220
221 ts->ts_id = us;
222
223 /* lock index asignment for us and them */
224 ts->ts_us0 = (us * 2);
225 ts->ts_us1 = (us * 2) + 1;
226 if (us < nthreads - 1) {
227 /* straight-thru connection to them */
228 ts->ts_them0 = (them * 2);
229 ts->ts_them1 = (them * 2) + 1;
230 } else {
231 /* cross-over connection to them */
232 ts->ts_them0 = (them * 2) + 1;
233 ts->ts_them1 = (them * 2);
234 }
235
236 ts->ts_once = 1;
237 }
238
239 /* block their first move */
240 e += block(ts->ts_them0);
241
242 return (e);
243 }
244
245 int
246 benchmark(void *tsd, result_t *res)
247 {
248 tsd_t *ts = (tsd_t *)tsd;
249 int i;
250 int e = 0;
251
252 /* wait to be unblocked (id == 0 will not block) */
253 e += block(ts->ts_us0);
254
255 for (i = 0; i < lm_optB; i += 2) {
256 /* allow them to block us again */
257 e += unblock(ts->ts_us0);
258
259 /* block their next + 1 move */
260 e += block(ts->ts_them1);
261
262 /* unblock their next move */
263 e += unblock(ts->ts_them0);
264
265 /* wait for them to unblock us */
266 e += block(ts->ts_us1);
267
268 /* repeat with locks reversed */
269 e += unblock(ts->ts_us1);
270 e += block(ts->ts_them0);
271 e += unblock(ts->ts_them1);
272 e += block(ts->ts_us0);
273 }
274
275 /* finish batch with nothing blocked */
276 e += unblock(ts->ts_them0);
277 e += unblock(ts->ts_us0);
278
279 res->re_count = i;
280 res->re_errors = e;
281
282 return (0);
283 }