]> git.saurik.com Git - apple/libdispatch.git/blob - src/firehose/firehose_inline_internal.h
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / firehose / firehose_inline_internal.h
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __FIREHOSE_INLINE_INTERNAL__
22 #define __FIREHOSE_INLINE_INTERNAL__
23
24 #define firehose_atomic_maxv2o(p, f, v, o, m) \
25 os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
26 if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
27 })
28
29 #define firehose_atomic_max2o(p, f, v, m) ({ \
30 typeof((p)->f) _old; \
31 firehose_atomic_maxv2o(p, f, v, &_old, m); \
32 })
33
34 #ifndef KERNEL
35 // caller must test for non zero first
36 OS_ALWAYS_INLINE
37 static inline uint16_t
38 firehose_bitmap_first_set(uint64_t bitmap)
39 {
40 dispatch_assert(bitmap != 0);
41 // this builtin returns 0 if bitmap is 0, or (first bit set + 1)
42 return (uint16_t)__builtin_ffsll((long long)bitmap) - 1;
43 }
44 #endif
45
46 #pragma mark -
47 #pragma mark Mach Misc.
48 #ifndef KERNEL
49
50 OS_ALWAYS_INLINE
51 static inline mach_port_t
52 firehose_mach_port_allocate(uint32_t flags, void *ctx)
53 {
54 mach_port_t port = MACH_PORT_NULL;
55 mach_port_options_t opts = {
56 .flags = flags,
57 };
58 kern_return_t kr = mach_port_construct(mach_task_self(), &opts,
59 (mach_port_context_t)ctx, &port);
60 if (unlikely(kr)) {
61 DISPATCH_VERIFY_MIG(kr);
62 DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port");
63 }
64 return port;
65 }
66
67 OS_ALWAYS_INLINE
68 static inline kern_return_t
69 firehose_mach_port_recv_dispose(mach_port_t port, void *ctx)
70 {
71 kern_return_t kr;
72 kr = mach_port_destruct(mach_task_self(), port, 0,
73 (mach_port_context_t)ctx);
74 DISPATCH_VERIFY_MIG(kr);
75 return kr;
76 }
77
78 OS_ALWAYS_INLINE
79 static inline void
80 firehose_mach_port_send_release(mach_port_t port)
81 {
82 kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
83 DISPATCH_VERIFY_MIG(kr);
84 dispatch_assume_zero(kr);
85 }
86
87 OS_ALWAYS_INLINE
88 static inline void
89 firehose_mach_port_guard(mach_port_t port, bool strict, void *ctx)
90 {
91 kern_return_t kr = mach_port_guard(mach_task_self(), port,
92 (mach_port_context_t)ctx, strict);
93 DISPATCH_VERIFY_MIG(kr);
94 dispatch_assume_zero(kr);
95 }
96
97 OS_ALWAYS_INLINE
98 static inline void
99 firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz,
100 mach_msg_header_t *hdr)
101 {
102 mig_reply_error_t *msg_reply = (mig_reply_error_t *)alloca(maxmsgsz);
103 kern_return_t rc = KERN_SUCCESS;
104 bool expects_reply = false;
105
106 if (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
107 expects_reply = true;
108 }
109
110 if (!fastpath(demux(hdr, &msg_reply->Head))) {
111 rc = MIG_BAD_ID;
112 } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
113 rc = KERN_SUCCESS;
114 } else {
115 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode
116 // is present
117 rc = msg_reply->RetCode;
118 }
119
120 if (slowpath(rc == KERN_SUCCESS && expects_reply)) {
121 // if crashing here, some handler returned KERN_SUCCESS
122 // hoping for firehose_mig_server to perform the mach_msg()
123 // call to reply, and it doesn't know how to do that
124 DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id,
125 "firehose_mig_server doesn't handle replies");
126 }
127 if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) {
128 // destroy the request - but not the reply port
129 hdr->msgh_remote_port = 0;
130 mach_msg_destroy(hdr);
131 }
132 }
133
134 #endif // !KERNEL
135 #pragma mark -
136 #pragma mark firehose buffer
137
138 OS_ALWAYS_INLINE
139 static inline firehose_chunk_t
140 firehose_buffer_chunk_for_address(void *addr)
141 {
142 uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1);
143 return (firehose_chunk_t)chunk_addr;
144 }
145
146 OS_ALWAYS_INLINE
147 static inline uint16_t
148 firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc)
149 {
150 return (uint16_t)(fbc - fb->fb_chunks);
151 }
152
153 OS_ALWAYS_INLINE
154 static inline firehose_chunk_t
155 firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref)
156 {
157 return fb->fb_chunks + ref;
158 }
159
160 #ifndef FIREHOSE_SERVER
161 #if DISPATCH_PURE_C
162
163 OS_ALWAYS_INLINE
164 static inline uint8_t
165 firehose_buffer_qos_bits_propagate(void)
166 {
167 #ifndef KERNEL
168 pthread_priority_t pp = _dispatch_priority_propagate();
169
170 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
171 return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT);
172 #else
173 return 0;
174 #endif
175 }
176
177 OS_ALWAYS_INLINE
178 static inline void
179 firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream)
180 {
181 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
182 firehose_stream_state_u old_state, new_state;
183 firehose_chunk_t fc;
184 uint64_t stamp = UINT64_MAX; // will cause the reservation to fail
185 uint16_t ref;
186 long result;
187
188 old_state.fss_atomic_state =
189 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
190 ref = old_state.fss_current;
191 if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) {
192 // there is no installed page, nothing to flush, go away
193 #ifndef KERNEL
194 firehose_buffer_force_connect(fb);
195 #endif
196 return;
197 }
198
199 fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
200 result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
201 firehose_buffer_qos_bits_propagate(), 1, 0, NULL);
202 if (likely(result < 0)) {
203 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
204 }
205 if (unlikely(result > 0)) {
206 // because we pass a silly stamp that requires a flush
207 DISPATCH_INTERNAL_CRASH(result, "Allocation should always fail");
208 }
209
210 // as a best effort try to uninstall the page we just flushed
211 // but failing is okay, let's not contend stupidly for something
212 // allocators know how to handle in the first place
213 new_state = old_state;
214 new_state.fss_current = 0;
215 (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state,
216 old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed);
217 }
218
219 /**
220 * @function firehose_buffer_tracepoint_reserve
221 *
222 * @abstract
223 * Reserves space in the firehose buffer for the tracepoint with specified
224 * characteristics.
225 *
226 * @discussion
227 * This returns a slot, with the length of the tracepoint already set, so
228 * that in case of a crash, we maximize our chance to be able to skip the
229 * tracepoint in case of a partial write.
230 *
231 * Once the tracepoint has been written, firehose_buffer_tracepoint_flush()
232 * must be called.
233 *
234 * @param fb
235 * The buffer to allocate from.
236 *
237 * @param stream
238 * The buffer stream to use.
239 *
240 * @param pubsize
241 * The size of the public data for this tracepoint, cannot be 0, doesn't
242 * take the size of the tracepoint header into account.
243 *
244 * @param privsize
245 * The size of the private data for this tracepoint, can be 0.
246 *
247 * @param privptr
248 * The pointer to the private buffer, can be NULL
249 *
250 * @result
251 * The pointer to the tracepoint.
252 */
253 OS_ALWAYS_INLINE
254 static inline firehose_tracepoint_t
255 firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp,
256 firehose_stream_t stream, uint16_t pubsize,
257 uint16_t privsize, uint8_t **privptr)
258 {
259 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
260 firehose_stream_state_u old_state, new_state;
261 firehose_chunk_t fc;
262 #if KERNEL
263 bool failable = false;
264 #endif
265 bool success;
266 long result;
267 uint16_t ref;
268
269 // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store
270 old_state.fss_atomic_state =
271 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
272 for (;;) {
273 new_state = old_state;
274
275 ref = old_state.fss_current;
276 if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) {
277 fc = firehose_buffer_ref_to_chunk(fb, ref);
278 result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
279 firehose_buffer_qos_bits_propagate(),
280 pubsize, privsize, privptr);
281 if (likely(result > 0)) {
282 uint64_t thread;
283 #ifdef KERNEL
284 thread = thread_tid(current_thread());
285 #else
286 thread = _pthread_threadid_self_np_direct();
287 #endif
288 return firehose_chunk_tracepoint_begin(fc,
289 stamp, pubsize, thread, result);
290 }
291 if (likely(result < 0)) {
292 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
293 }
294 new_state.fss_current = 0;
295 }
296 #if KERNEL
297 if (failable) {
298 return NULL;
299 }
300 #endif
301
302 if (unlikely(old_state.fss_allocator)) {
303 _dispatch_gate_wait(&fbs->fbs_state.fss_gate,
304 DLOCK_LOCK_DATA_CONTENTION);
305 old_state.fss_atomic_state =
306 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
307 #if KERNEL
308 failable = true;
309 #endif
310 continue;
311 }
312
313 // if the thread doing the allocation is a low priority one
314 // we may starve high priority ones.
315 // so disable preemption before we become an allocator
316 // the reenabling of the preemption is in
317 // firehose_buffer_stream_chunk_install
318 __firehose_critical_region_enter();
319 #if KERNEL
320 new_state.fss_allocator = (uint32_t)cpu_number();
321 #else
322 new_state.fss_allocator = _dispatch_lock_value_for_self();
323 #endif
324 success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state,
325 old_state.fss_atomic_state, new_state.fss_atomic_state,
326 &old_state.fss_atomic_state, relaxed);
327 if (likely(success)) {
328 break;
329 }
330 __firehose_critical_region_leave();
331 }
332
333 struct firehose_tracepoint_query_s ask = {
334 .pubsize = pubsize,
335 .privsize = privsize,
336 .stream = stream,
337 .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0,
338 #ifndef KERNEL
339 .quarantined = fb->fb_header.fbh_quarantined,
340 #endif
341 .stamp = stamp,
342 };
343 return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr);
344 }
345
346 /**
347 * @function firehose_buffer_tracepoint_flush
348 *
349 * @abstract
350 * Flushes a firehose tracepoint, and sends the chunk to the daemon when full
351 * and this was the last tracepoint writer for this chunk.
352 *
353 * @param fb
354 * The buffer the tracepoint belongs to.
355 *
356 * @param ft
357 * The tracepoint to flush.
358 *
359 * @param ftid
360 * The firehose tracepoint ID for that tracepoint.
361 * It is written last, preventing compiler reordering, so that its absence
362 * on crash recovery means the tracepoint is partial.
363 */
364 OS_ALWAYS_INLINE
365 static inline void
366 firehose_buffer_tracepoint_flush(firehose_buffer_t fb,
367 firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
368 {
369 firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft);
370
371 // Needed for process death handling (tracepoint-flush):
372 // We want to make sure the observers
373 // will see memory effects in program (asm) order.
374 // 1. write all the data to the tracepoint
375 // 2. write the tracepoint ID, so that seeing it means the tracepoint
376 // is valid
377 if (firehose_chunk_tracepoint_end(fc, ft, ftid)) {
378 firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc));
379 }
380 }
381
382 #ifndef KERNEL
383 OS_ALWAYS_INLINE
384 static inline void
385 firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits)
386 {
387 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
388 unsigned long orig_flags;
389
390 orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed);
391 if (orig_flags != (orig_flags & ~bits)) {
392 firehose_buffer_update_limits(fb);
393 }
394 }
395
396 OS_ALWAYS_INLINE
397 static inline void
398 firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits)
399 {
400 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
401 unsigned long orig_flags;
402
403 orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed);
404 if (orig_flags != (orig_flags | bits)) {
405 firehose_buffer_update_limits(fb);
406 }
407 }
408 #endif // !KERNEL
409
410 #endif // !defined(FIREHOSE_SERVER)
411
412 #endif // DISPATCH_PURE_C
413
414 #endif // __FIREHOSE_INLINE_INTERNAL__