]>
git.saurik.com Git - apple/xnu.git/blob - tools/tests/libMicro/cascade_mutex.c
   4  * The contents of this file are subject to the terms 
   5  * of the Common Development and Distribution License 
   6  * (the "License").  You may not use this file except 
   7  * in compliance with the License. 
   9  * You can obtain a copy of the license at 
  10  * src/OPENSOLARIS.LICENSE 
  11  * or http://www.opensolaris.org/os/licensing. 
  12  * See the License for the specific language governing 
  13  * permissions and limitations under the License. 
  15  * When distributing Covered Code, include this CDDL 
  16  * HEADER in each file and include the License file at 
  17  * usr/src/OPENSOLARIS.LICENSE.  If applicable, 
  18  * add the following below this CDDL HEADER, with the 
  19  * fields enclosed by brackets "[]" replaced with your 
  20  * own identifying information: Portions Copyright [yyyy] 
  21  * [name of copyright owner] 
  27  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved. 
  28  * Use is subject to license terms. 
  32  * The "cascade" test case is a multiprocess/multithread batten-passing model 
  33  * using lock primitives alone for synchronisation. Threads are arranged in a 
  34  * ring. Each thread has two locks of its own on which it blocks, and is able 
  35  * to manipulate the two locks belonging to the thread which follows it in the 
  38  * The number of threads (nthreads) is specified by the generic libMicro -P/-T 
  39  * options. With nthreads == 1 (the default) the uncontended case can be timed. 
  41  * The main logic is generic and allows any simple blocking API to be tested. 
  42  * The API-specific component is clearly indicated. 
  56         int                     ts_us0
;         /* our lock indices */ 
  58         int                     ts_them0
;       /* their lock indices */ 
  65  * API-specific code BEGINS here 
  70 static pthread_mutex_t  
*locks
; 
  75         lm_tsdsize 
= sizeof (tsd_t
); 
  77         (void) sprintf(lm_optstr
, "s"); 
  79         lm_defN 
= "cscd_mutex"; 
  81         (void) sprintf(lm_usage
, 
  82             "       [-s] (force PTHREAD_PROCESS_SHARED)\n" 
  83             "notes: thread cascade using pthread_mutexes\n"); 
  90 benchmark_optswitch(int opt
, char *optarg
) 
 107         pthread_mutexattr_t     ma
; 
 109         nthreads 
= lm_optP 
* lm_optT
; 
 110         nlocks 
= nthreads 
* 2; 
 112         locks 
= (pthread_mutex_t 
*)mmap(NULL
, 
 113             nlocks 
* sizeof (pthread_mutex_t
), 
 114             PROT_READ 
| PROT_WRITE
, 
 115             MAP_ANON 
| MAP_SHARED
, 
 117         if (locks 
== MAP_FAILED
) { 
 121         (void) pthread_mutexattr_init(&ma
); 
 122         if (lm_optP 
> 1 || opts
) { 
 123                 (void) pthread_mutexattr_setpshared(&ma
, 
 124                     PTHREAD_PROCESS_SHARED
); 
 126                 (void) pthread_mutexattr_setpshared(&ma
, 
 127                     PTHREAD_PROCESS_PRIVATE
); 
 130         for (i 
= 0; i 
< nlocks
; i
++) { 
 131                 (void) pthread_mutex_init(&locks
[i
], &ma
); 
 140         return (pthread_mutex_lock(&locks
[index
]) == -1); 
 146         return (pthread_mutex_unlock(&locks
[index
]) == -1); 
 150  * API-specific code ENDS here 
 154 benchmark_initbatch(void *tsd
) 
 156         tsd_t                   
*ts 
= (tsd_t 
*)tsd
; 
 159         if (ts
->ts_once 
== 0) { 
 162 #if !defined(__APPLE__) 
 163                 us 
= (getpindex() * lm_optT
) + gettindex(); 
 165                 us 
= gettsdindex(tsd
); 
 166 #endif /* __APPLE__ */ 
 168                 them 
= (us 
+ 1) % (lm_optP 
* lm_optT
); 
 172                 /* lock index asignment for us and them */ 
 173                 ts
->ts_us0 
= (us 
* 2); 
 174                 ts
->ts_us1 
= (us 
* 2) + 1; 
 175                 if (us 
< nthreads 
- 1) { 
 176                         /* straight-thru connection to them */ 
 177                         ts
->ts_them0 
= (them 
* 2); 
 178                         ts
->ts_them1 
= (them 
* 2) + 1; 
 180                         /* cross-over connection to them */ 
 181                         ts
->ts_them0 
= (them 
* 2) + 1; 
 182                         ts
->ts_them1 
= (them 
* 2); 
 188         /* block their first move */ 
 189         e 
+= block(ts
->ts_them0
); 
 195 benchmark(void *tsd
, result_t 
*res
) 
 197         tsd_t                   
*ts 
= (tsd_t 
*)tsd
; 
 201         /* wait to be unblocked (id == 0 will not block) */ 
 202         e 
+= block(ts
->ts_us0
); 
 204         for (i 
= 0; i 
< lm_optB
; i 
+= 2) { 
 205                 /* allow them to block us again */ 
 206                 e 
+= unblock(ts
->ts_us0
); 
 208                 /* block their next + 1 move */ 
 209                 e 
+= block(ts
->ts_them1
); 
 211                 /* unblock their next move */ 
 212                 e 
+= unblock(ts
->ts_them0
); 
 214                 /* wait for them to unblock us */ 
 215                 e 
+= block(ts
->ts_us1
); 
 217                 /* repeat with locks reversed */ 
 218                 e 
+= unblock(ts
->ts_us1
); 
 219                 e 
+= block(ts
->ts_them0
); 
 220                 e 
+= unblock(ts
->ts_them1
); 
 221                 e 
+= block(ts
->ts_us0
); 
 224         /* finish batch with nothing blocked */ 
 225         e 
+= unblock(ts
->ts_them0
); 
 226         e 
+= unblock(ts
->ts_us0
);