]>
git.saurik.com Git - apple/xnu.git/blob - tools/tests/libMicro/cascade_cond.c
4 * The contents of this file are subject to the terms
5 * of the Common Development and Distribution License
6 * (the "License"). You may not use this file except
7 * in compliance with the License.
9 * You can obtain a copy of the license at
10 * src/OPENSOLARIS.LICENSE
11 * or http://www.opensolaris.org/os/licensing.
12 * See the License for the specific language governing
13 * permissions and limitations under the License.
15 * When distributing Covered Code, include this CDDL
16 * HEADER in each file and include the License file at
17 * usr/src/OPENSOLARIS.LICENSE. If applicable,
18 * add the following below this CDDL HEADER, with the
19 * fields enclosed by brackets "[]" replaced with your
20 * own identifying information: Portions Copyright [yyyy]
21 * [name of copyright owner]
27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
32 * The "cascade" test case is a multiprocess/multithread batten-passing model
33 * using lock primitives alone for synchronisation. Threads are arranged in a
34 * ring. Each thread has two locks of its own on which it blocks, and is able
35 * to manipulate the two locks belonging to the thread which follows it in the
38 * The number of threads (nthreads) is specified by the generic libMicro -P/-T
39 * options. With nthreads == 1 (the default) the uncontended case can be timed.
41 * The main logic is generic and allows any simple blocking API to be tested.
42 * The API-specific component is clearly indicated.
56 int ts_us0
; /* our lock indices */
58 int ts_them0
; /* their lock indices */
65 * API-specific code BEGINS here
71 static pthread_mutex_t
*mxs
;
72 static pthread_cond_t
*cvs
;
78 lm_tsdsize
= sizeof (tsd_t
);
80 (void) sprintf(lm_optstr
, "os");
82 lm_defN
= "cscd_cond";
84 (void) sprintf(lm_usage
,
85 " [-o] (do signal outside mutex)\n"
86 " [-s] (force PTHREAD_PROCESS_SHARED)\n"
87 "notes: thread cascade using pthread_conds\n");
94 benchmark_optswitch(int opt
, char *optarg
)
114 pthread_mutexattr_t ma
;
115 pthread_condattr_t ca
;
117 nthreads
= lm_optP
* lm_optT
;
118 nlocks
= nthreads
* 2;
120 mxs
= (pthread_mutex_t
*)mmap(NULL
,
121 nlocks
* sizeof (pthread_mutex_t
),
122 PROT_READ
| PROT_WRITE
,
123 MAP_ANON
| MAP_SHARED
,
125 if (mxs
== MAP_FAILED
) {
130 cvs
= (pthread_cond_t
*)mmap(NULL
,
131 nlocks
* sizeof (pthread_cond_t
),
132 PROT_READ
| PROT_WRITE
,
133 MAP_ANON
| MAP_SHARED
,
135 if (cvs
== MAP_FAILED
) {
140 conds
= (int *)mmap(NULL
,
141 nlocks
* sizeof (pthread_cond_t
),
142 PROT_READ
| PROT_WRITE
,
143 MAP_ANON
| MAP_SHARED
,
145 if (conds
== MAP_FAILED
) {
149 (void) pthread_mutexattr_init(&ma
);
150 (void) pthread_condattr_init(&ca
);
151 if (lm_optP
> 1 || opts
) {
152 (void) pthread_mutexattr_setpshared(&ma
,
153 PTHREAD_PROCESS_SHARED
);
154 (void) pthread_condattr_setpshared(&ca
,
155 PTHREAD_PROCESS_SHARED
);
157 (void) pthread_mutexattr_setpshared(&ma
,
158 PTHREAD_PROCESS_PRIVATE
);
159 (void) pthread_condattr_setpshared(&ca
,
160 PTHREAD_PROCESS_PRIVATE
);
163 for (i
= 0; i
< nlocks
; i
++) {
164 (void) pthread_mutex_init(&mxs
[i
], &ma
);
165 (void) pthread_cond_init(&cvs
[i
], &ca
);
175 (void) pthread_mutex_lock(&mxs
[index
]);
176 while (conds
[index
] != 0) {
177 (void) pthread_cond_wait(&cvs
[index
], &mxs
[index
]);
180 (void) pthread_mutex_unlock(&mxs
[index
]);
188 (void) pthread_mutex_lock(&mxs
[index
]);
191 (void) pthread_mutex_unlock(&mxs
[index
]);
192 (void) pthread_cond_signal(&cvs
[index
]);
194 (void) pthread_cond_signal(&cvs
[index
]);
195 (void) pthread_mutex_unlock(&mxs
[index
]);
201 * API-specific code ENDS here
205 benchmark_initbatch(void *tsd
)
207 tsd_t
*ts
= (tsd_t
*)tsd
;
210 if (ts
->ts_once
== 0) {
213 #if !defined(__APPLE__)
214 us
= (getpindex() * lm_optT
) + gettindex();
216 us
= gettsdindex(tsd
);
217 #endif /* __APPLE__ */
219 them
= (us
+ 1) % (lm_optP
* lm_optT
);
223 /* lock index asignment for us and them */
224 ts
->ts_us0
= (us
* 2);
225 ts
->ts_us1
= (us
* 2) + 1;
226 if (us
< nthreads
- 1) {
227 /* straight-thru connection to them */
228 ts
->ts_them0
= (them
* 2);
229 ts
->ts_them1
= (them
* 2) + 1;
231 /* cross-over connection to them */
232 ts
->ts_them0
= (them
* 2) + 1;
233 ts
->ts_them1
= (them
* 2);
239 /* block their first move */
240 e
+= block(ts
->ts_them0
);
246 benchmark(void *tsd
, result_t
*res
)
248 tsd_t
*ts
= (tsd_t
*)tsd
;
252 /* wait to be unblocked (id == 0 will not block) */
253 e
+= block(ts
->ts_us0
);
255 for (i
= 0; i
< lm_optB
; i
+= 2) {
256 /* allow them to block us again */
257 e
+= unblock(ts
->ts_us0
);
259 /* block their next + 1 move */
260 e
+= block(ts
->ts_them1
);
262 /* unblock their next move */
263 e
+= unblock(ts
->ts_them0
);
265 /* wait for them to unblock us */
266 e
+= block(ts
->ts_us1
);
268 /* repeat with locks reversed */
269 e
+= unblock(ts
->ts_us1
);
270 e
+= block(ts
->ts_them0
);
271 e
+= unblock(ts
->ts_them1
);
272 e
+= block(ts
->ts_us0
);
275 /* finish batch with nothing blocked */
276 e
+= unblock(ts
->ts_them0
);
277 e
+= unblock(ts
->ts_us0
);