2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #undef dispatch_once_f
27 struct _dispatch_once_waiter_s
{
28 volatile struct _dispatch_once_waiter_s
*volatile dow_next
;
29 _dispatch_thread_semaphore_t dow_sema
;
32 #define DISPATCH_ONCE_DONE ((struct _dispatch_once_waiter_s *)~0l)
36 dispatch_once(dispatch_once_t
*val
, dispatch_block_t block
)
38 dispatch_once_f(val
, block
, _dispatch_Block_invoke(block
));
44 dispatch_once_f(dispatch_once_t
*val
, void *ctxt
, dispatch_function_t func
)
46 struct _dispatch_once_waiter_s
* volatile *vval
=
47 (struct _dispatch_once_waiter_s
**)val
;
48 struct _dispatch_once_waiter_s dow
= { NULL
, 0 };
49 struct _dispatch_once_waiter_s
*tail
, *tmp
;
50 _dispatch_thread_semaphore_t sema
;
52 if (dispatch_atomic_cmpxchg(vval
, NULL
, &dow
, acquire
)) {
53 _dispatch_client_callout(ctxt
, func
);
55 // The next barrier must be long and strong.
57 // The scenario: SMP systems with weakly ordered memory models
58 // and aggressive out-of-order instruction execution.
62 // The dispatch_once*() wrapper macro causes the callee's
63 // instruction stream to look like this (pseudo-RISC):
68 // call dispatch_once*()
72 // May be re-ordered like so:
78 // call dispatch_once*()
81 // Normally, a barrier on the read side is used to workaround
82 // the weakly ordered memory model. But barriers are expensive
83 // and we only need to synchronize once! After func(ctxt)
84 // completes, the predicate will be marked as "done" and the
85 // branch predictor will correctly skip the call to
88 // A far faster alternative solution: Defeat the speculative
89 // read-ahead of peer CPUs.
91 // Modern architectures will throw away speculative results
92 // once a branch mis-prediction occurs. Therefore, if we can
93 // ensure that the predicate is not marked as being complete
94 // until long after the last store by func(ctxt), then we have
95 // defeated the read-ahead of peer CPUs.
97 // In other words, the last "store" by func(ctxt) must complete
98 // and then N cycles must elapse before ~0l is stored to *val.
99 // The value of N is whatever is sufficient to defeat the
100 // read-ahead mechanism of peer CPUs.
102 // On some CPUs, the most fully synchronizing instruction might
103 // need to be issued.
105 dispatch_atomic_maximally_synchronizing_barrier();
106 // above assumed to contain release barrier
107 tmp
= dispatch_atomic_xchg(vval
, DISPATCH_ONCE_DONE
, relaxed
);
109 while (tail
!= tmp
) {
110 while (!tmp
->dow_next
) {
111 dispatch_hardware_pause();
113 sema
= tmp
->dow_sema
;
114 tmp
= (struct _dispatch_once_waiter_s
*)tmp
->dow_next
;
115 _dispatch_thread_semaphore_signal(sema
);
118 dow
.dow_sema
= _dispatch_get_thread_semaphore();
121 if (tmp
== DISPATCH_ONCE_DONE
) {
124 if (dispatch_atomic_cmpxchgvw(vval
, tmp
, &dow
, &tmp
, release
)) {
126 _dispatch_thread_semaphore_wait(dow
.dow_sema
);
130 _dispatch_put_thread_semaphore(dow
.dow_sema
);