]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/libMicro/cascade_mutex.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / tools / tests / libMicro / cascade_mutex.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms
5 * of the Common Development and Distribution License
6 * (the "License"). You may not use this file except
7 * in compliance with the License.
8 *
9 * You can obtain a copy of the license at
10 * src/OPENSOLARIS.LICENSE
11 * or http://www.opensolaris.org/os/licensing.
12 * See the License for the specific language governing
13 * permissions and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL
16 * HEADER in each file and include the License file at
17 * usr/src/OPENSOLARIS.LICENSE. If applicable,
18 * add the following below this CDDL HEADER, with the
19 * fields enclosed by brackets "[]" replaced with your
20 * own identifying information: Portions Copyright [yyyy]
21 * [name of copyright owner]
22 *
23 * CDDL HEADER END
24 */
25
26 /*
27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31 /*
32 * The "cascade" test case is a multiprocess/multithread batten-passing model
33 * using lock primitives alone for synchronisation. Threads are arranged in a
34 * ring. Each thread has two locks of its own on which it blocks, and is able
35 * to manipulate the two locks belonging to the thread which follows it in the
36 * ring.
37 *
38 * The number of threads (nthreads) is specified by the generic libMicro -P/-T
39 * options. With nthreads == 1 (the default) the uncontended case can be timed.
40 *
41 * The main logic is generic and allows any simple blocking API to be tested.
42 * The API-specific component is clearly indicated.
43 */
44
45 #include <unistd.h>
46 #include <stdlib.h>
47 #include <stdio.h>
48 #include <pthread.h>
49 #include <sys/mman.h>
50
51 #include "libmicro.h"
52
53 typedef struct {
54 int ts_once;
55 int ts_id;
56 int ts_us0; /* our lock indices */
57 int ts_us1;
58 int ts_them0; /* their lock indices */
59 int ts_them1;
60 } tsd_t;
61
62 static int nthreads;
63
64 /*
65 * API-specific code BEGINS here
66 */
67
68 static int opts = 0;
69 static int nlocks;
70 static pthread_mutex_t *locks;
71
72 int
73 benchmark_init()
74 {
75 lm_tsdsize = sizeof (tsd_t);
76
77 (void) sprintf(lm_optstr, "s");
78
79 lm_defN = "cscd_mutex";
80
81 (void) sprintf(lm_usage,
82 " [-s] (force PTHREAD_PROCESS_SHARED)\n"
83 "notes: thread cascade using pthread_mutexes\n");
84
85 return (0);
86 }
87
88 /*ARGSUSED*/
89 int
90 benchmark_optswitch(int opt, char *optarg)
91 {
92 switch (opt) {
93 case 's':
94 opts = 1;
95 break;
96 default:
97 return (-1);
98 }
99 return (0);
100 }
101
102 int
103 benchmark_initrun()
104 {
105 int i;
106 int e = 0;
107 pthread_mutexattr_t ma;
108
109 nthreads = lm_optP * lm_optT;
110 nlocks = nthreads * 2;
111 /*LINTED*/
112 locks = (pthread_mutex_t *)mmap(NULL,
113 nlocks * sizeof (pthread_mutex_t),
114 PROT_READ | PROT_WRITE,
115 MAP_ANON | MAP_SHARED,
116 -1, 0L);
117 if (locks == MAP_FAILED) {
118 return (1);
119 }
120
121 (void) pthread_mutexattr_init(&ma);
122 if (lm_optP > 1 || opts) {
123 (void) pthread_mutexattr_setpshared(&ma,
124 PTHREAD_PROCESS_SHARED);
125 } else {
126 (void) pthread_mutexattr_setpshared(&ma,
127 PTHREAD_PROCESS_PRIVATE);
128 }
129
130 for (i = 0; i < nlocks; i++) {
131 (void) pthread_mutex_init(&locks[i], &ma);
132 }
133
134 return (e);
135 }
136
137 int
138 block(int index)
139 {
140 return (pthread_mutex_lock(&locks[index]) == -1);
141 }
142
143 int
144 unblock(int index)
145 {
146 return (pthread_mutex_unlock(&locks[index]) == -1);
147 }
148
149 /*
150 * API-specific code ENDS here
151 */
152
153 int
154 benchmark_initbatch(void *tsd)
155 {
156 tsd_t *ts = (tsd_t *)tsd;
157 int e = 0;
158
159 if (ts->ts_once == 0) {
160 int us, them;
161
162 #if !defined(__APPLE__)
163 us = (getpindex() * lm_optT) + gettindex();
164 #else
165 us = gettsdindex(tsd);
166 #endif /* __APPLE__ */
167
168 them = (us + 1) % (lm_optP * lm_optT);
169
170 ts->ts_id = us;
171
172 /* lock index asignment for us and them */
173 ts->ts_us0 = (us * 2);
174 ts->ts_us1 = (us * 2) + 1;
175 if (us < nthreads - 1) {
176 /* straight-thru connection to them */
177 ts->ts_them0 = (them * 2);
178 ts->ts_them1 = (them * 2) + 1;
179 } else {
180 /* cross-over connection to them */
181 ts->ts_them0 = (them * 2) + 1;
182 ts->ts_them1 = (them * 2);
183 }
184
185 ts->ts_once = 1;
186 }
187
188 /* block their first move */
189 e += block(ts->ts_them0);
190
191 return (e);
192 }
193
194 int
195 benchmark(void *tsd, result_t *res)
196 {
197 tsd_t *ts = (tsd_t *)tsd;
198 int i;
199 int e = 0;
200
201 /* wait to be unblocked (id == 0 will not block) */
202 e += block(ts->ts_us0);
203
204 for (i = 0; i < lm_optB; i += 2) {
205 /* allow them to block us again */
206 e += unblock(ts->ts_us0);
207
208 /* block their next + 1 move */
209 e += block(ts->ts_them1);
210
211 /* unblock their next move */
212 e += unblock(ts->ts_them0);
213
214 /* wait for them to unblock us */
215 e += block(ts->ts_us1);
216
217 /* repeat with locks reversed */
218 e += unblock(ts->ts_us1);
219 e += block(ts->ts_them0);
220 e += unblock(ts->ts_them1);
221 e += block(ts->ts_us0);
222 }
223
224 /* finish batch with nothing blocked */
225 e += unblock(ts->ts_them0);
226 e += unblock(ts->ts_us0);
227
228 res->re_count = i;
229 res->re_errors = e;
230
231 return (0);
232 }