]> git.saurik.com Git - apple/libdispatch.git/blob - src/firehose/firehose_inline_internal.h
libdispatch-913.30.4.tar.gz
[apple/libdispatch.git] / src / firehose / firehose_inline_internal.h
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __FIREHOSE_INLINE_INTERNAL__
22 #define __FIREHOSE_INLINE_INTERNAL__
23
24 #ifndef _os_atomic_basetypeof
25 #define _os_atomic_basetypeof(p) \
26 typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
27 #endif
28
29 #define firehose_atomic_maxv2o(p, f, v, o, m) \
30 os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
31 if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
32 })
33
34 #define firehose_atomic_max2o(p, f, v, m) ({ \
35 _os_atomic_basetypeof(&(p)->f) _old; \
36 firehose_atomic_maxv2o(p, f, v, &_old, m); \
37 })
38
39 #ifndef KERNEL
40 // caller must test for non zero first
41 OS_ALWAYS_INLINE
42 static inline uint16_t
43 firehose_bitmap_first_set(uint64_t bitmap)
44 {
45 dispatch_assert(bitmap != 0);
46 // this builtin returns 0 if bitmap is 0, or (first bit set + 1)
47 return (uint16_t)__builtin_ffsll((long long)bitmap) - 1;
48 }
49 #endif
50
51 #pragma mark -
52 #pragma mark Mach Misc.
53 #ifndef KERNEL
54
55 OS_ALWAYS_INLINE
56 static inline mach_port_t
57 firehose_mach_port_allocate(uint32_t flags, void *ctx)
58 {
59 mach_port_t port = MACH_PORT_NULL;
60 mach_port_options_t opts = {
61 .flags = flags,
62 };
63 kern_return_t kr = mach_port_construct(mach_task_self(), &opts,
64 (mach_port_context_t)ctx, &port);
65 if (unlikely(kr)) {
66 DISPATCH_VERIFY_MIG(kr);
67 DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port");
68 }
69 return port;
70 }
71
72 OS_ALWAYS_INLINE
73 static inline kern_return_t
74 firehose_mach_port_recv_dispose(mach_port_t port, void *ctx)
75 {
76 kern_return_t kr;
77 kr = mach_port_destruct(mach_task_self(), port, 0,
78 (mach_port_context_t)ctx);
79 DISPATCH_VERIFY_MIG(kr);
80 return kr;
81 }
82
83 OS_ALWAYS_INLINE
84 static inline void
85 firehose_mach_port_send_release(mach_port_t port)
86 {
87 kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
88 DISPATCH_VERIFY_MIG(kr);
89 dispatch_assume_zero(kr);
90 }
91
92 OS_ALWAYS_INLINE
93 static inline void
94 firehose_mach_port_guard(mach_port_t port, bool strict, void *ctx)
95 {
96 kern_return_t kr = mach_port_guard(mach_task_self(), port,
97 (mach_port_context_t)ctx, strict);
98 DISPATCH_VERIFY_MIG(kr);
99 dispatch_assume_zero(kr);
100 }
101
102 OS_ALWAYS_INLINE
103 static inline void
104 firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz,
105 mach_msg_header_t *hdr)
106 {
107 mig_reply_error_t *msg_reply = (mig_reply_error_t *)alloca(maxmsgsz);
108 kern_return_t rc = KERN_SUCCESS;
109 bool expects_reply = false;
110
111 if (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
112 expects_reply = true;
113 }
114
115 if (!fastpath(demux(hdr, &msg_reply->Head))) {
116 rc = MIG_BAD_ID;
117 } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
118 rc = KERN_SUCCESS;
119 } else {
120 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode
121 // is present
122 rc = msg_reply->RetCode;
123 }
124
125 if (slowpath(rc == KERN_SUCCESS && expects_reply)) {
126 // if crashing here, some handler returned KERN_SUCCESS
127 // hoping for firehose_mig_server to perform the mach_msg()
128 // call to reply, and it doesn't know how to do that
129 DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id,
130 "firehose_mig_server doesn't handle replies");
131 }
132 if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) {
133 // destroy the request - but not the reply port
134 hdr->msgh_remote_port = 0;
135 mach_msg_destroy(hdr);
136 }
137 }
138
139 #endif // !KERNEL
140 #pragma mark -
141 #pragma mark firehose buffer
142
143 OS_ALWAYS_INLINE
144 static inline firehose_chunk_t
145 firehose_buffer_chunk_for_address(void *addr)
146 {
147 uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1);
148 return (firehose_chunk_t)chunk_addr;
149 }
150
151 OS_ALWAYS_INLINE
152 static inline uint16_t
153 firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc)
154 {
155 return (uint16_t)(fbc - fb->fb_chunks);
156 }
157
158 OS_ALWAYS_INLINE
159 static inline firehose_chunk_t
160 firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref)
161 {
162 return fb->fb_chunks + ref;
163 }
164
165 #ifndef FIREHOSE_SERVER
166 #if DISPATCH_PURE_C
167
168 OS_ALWAYS_INLINE
169 static inline uint8_t
170 firehose_buffer_qos_bits_propagate(void)
171 {
172 #ifndef KERNEL
173 pthread_priority_t pp = _dispatch_priority_propagate();
174
175 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
176 return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT);
177 #else
178 return 0;
179 #endif
180 }
181
182 OS_ALWAYS_INLINE
183 static inline void
184 firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream)
185 {
186 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
187 firehose_stream_state_u old_state, new_state;
188 firehose_chunk_t fc;
189 uint64_t stamp = UINT64_MAX; // will cause the reservation to fail
190 uint16_t ref;
191 long result;
192
193 old_state.fss_atomic_state =
194 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
195 ref = old_state.fss_current;
196 if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) {
197 // there is no installed page, nothing to flush, go away
198 #ifndef KERNEL
199 firehose_buffer_force_connect(fb);
200 #endif
201 return;
202 }
203
204 fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
205 result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
206 firehose_buffer_qos_bits_propagate(), 1, 0, NULL);
207 if (likely(result < 0)) {
208 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
209 }
210 if (unlikely(result > 0)) {
211 // because we pass a silly stamp that requires a flush
212 DISPATCH_INTERNAL_CRASH(result, "Allocation should always fail");
213 }
214
215 // as a best effort try to uninstall the page we just flushed
216 // but failing is okay, let's not contend stupidly for something
217 // allocators know how to handle in the first place
218 new_state = old_state;
219 new_state.fss_current = 0;
220 (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state,
221 old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed);
222 }
223
224 /**
225 * @function firehose_buffer_tracepoint_reserve
226 *
227 * @abstract
228 * Reserves space in the firehose buffer for the tracepoint with specified
229 * characteristics.
230 *
231 * @discussion
232 * This returns a slot, with the length of the tracepoint already set, so
233 * that in case of a crash, we maximize our chance to be able to skip the
234 * tracepoint in case of a partial write.
235 *
236 * Once the tracepoint has been written, firehose_buffer_tracepoint_flush()
237 * must be called.
238 *
239 * @param fb
240 * The buffer to allocate from.
241 *
242 * @param stream
243 * The buffer stream to use.
244 *
245 * @param pubsize
246 * The size of the public data for this tracepoint, cannot be 0, doesn't
247 * take the size of the tracepoint header into account.
248 *
249 * @param privsize
250 * The size of the private data for this tracepoint, can be 0.
251 *
252 * @param privptr
253 * The pointer to the private buffer, can be NULL
254 *
255 * @result
256 * The pointer to the tracepoint.
257 */
258 OS_ALWAYS_INLINE
259 static inline firehose_tracepoint_t
260 firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp,
261 firehose_stream_t stream, uint16_t pubsize,
262 uint16_t privsize, uint8_t **privptr)
263 {
264 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
265 firehose_stream_state_u old_state, new_state;
266 firehose_chunk_t fc;
267 #if KERNEL
268 bool failable = false;
269 #endif
270 bool success;
271 long result;
272 uint16_t ref;
273
274 // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store
275 old_state.fss_atomic_state =
276 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
277 for (;;) {
278 new_state = old_state;
279
280 ref = old_state.fss_current;
281 if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) {
282 fc = firehose_buffer_ref_to_chunk(fb, ref);
283 result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
284 firehose_buffer_qos_bits_propagate(),
285 pubsize, privsize, privptr);
286 if (likely(result > 0)) {
287 uint64_t thread;
288 #ifdef KERNEL
289 thread = thread_tid(current_thread());
290 #else
291 thread = _pthread_threadid_self_np_direct();
292 #endif
293 return firehose_chunk_tracepoint_begin(fc,
294 stamp, pubsize, thread, result);
295 }
296 if (likely(result < 0)) {
297 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
298 }
299 new_state.fss_current = 0;
300 }
301 #if KERNEL
302 if (failable) {
303 return NULL;
304 }
305 #endif
306
307 if (unlikely(old_state.fss_allocator)) {
308 _dispatch_gate_wait(&fbs->fbs_state.fss_gate,
309 DLOCK_LOCK_DATA_CONTENTION);
310 old_state.fss_atomic_state =
311 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
312 #if KERNEL
313 failable = true;
314 #endif
315 continue;
316 }
317
318 // if the thread doing the allocation is a low priority one
319 // we may starve high priority ones.
320 // so disable preemption before we become an allocator
321 // the reenabling of the preemption is in
322 // firehose_buffer_stream_chunk_install
323 __firehose_critical_region_enter();
324 #if KERNEL
325 new_state.fss_allocator = (uint32_t)cpu_number();
326 #else
327 new_state.fss_allocator = _dispatch_lock_value_for_self();
328 #endif
329 success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state,
330 old_state.fss_atomic_state, new_state.fss_atomic_state,
331 &old_state.fss_atomic_state, relaxed);
332 if (likely(success)) {
333 break;
334 }
335 __firehose_critical_region_leave();
336 }
337
338 struct firehose_tracepoint_query_s ask = {
339 .pubsize = pubsize,
340 .privsize = privsize,
341 .stream = stream,
342 .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0,
343 #ifndef KERNEL
344 .quarantined = fb->fb_header.fbh_quarantined,
345 #endif
346 .stamp = stamp,
347 };
348 return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr);
349 }
350
351 /**
352 * @function firehose_buffer_tracepoint_flush
353 *
354 * @abstract
355 * Flushes a firehose tracepoint, and sends the chunk to the daemon when full
356 * and this was the last tracepoint writer for this chunk.
357 *
358 * @param fb
359 * The buffer the tracepoint belongs to.
360 *
361 * @param ft
362 * The tracepoint to flush.
363 *
364 * @param ftid
365 * The firehose tracepoint ID for that tracepoint.
366 * It is written last, preventing compiler reordering, so that its absence
367 * on crash recovery means the tracepoint is partial.
368 */
369 OS_ALWAYS_INLINE
370 static inline void
371 firehose_buffer_tracepoint_flush(firehose_buffer_t fb,
372 firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
373 {
374 firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft);
375
376 // Needed for process death handling (tracepoint-flush):
377 // We want to make sure the observers
378 // will see memory effects in program (asm) order.
379 // 1. write all the data to the tracepoint
380 // 2. write the tracepoint ID, so that seeing it means the tracepoint
381 // is valid
382 if (firehose_chunk_tracepoint_end(fc, ft, ftid)) {
383 firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc));
384 }
385 }
386
387 #ifndef KERNEL
388 OS_ALWAYS_INLINE
389 static inline void
390 firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits)
391 {
392 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
393 unsigned long orig_flags;
394
395 orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed);
396 if (orig_flags != (orig_flags & ~bits)) {
397 firehose_buffer_update_limits(fb);
398 }
399 }
400
401 OS_ALWAYS_INLINE
402 static inline void
403 firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits)
404 {
405 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
406 unsigned long orig_flags;
407
408 orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed);
409 if (orig_flags != (orig_flags | bits)) {
410 firehose_buffer_update_limits(fb);
411 }
412 }
413 #endif // !KERNEL
414
415 #endif // !defined(FIREHOSE_SERVER)
416
417 #endif // DISPATCH_PURE_C
418
419 #endif // __FIREHOSE_INLINE_INTERNAL__