]> git.saurik.com Git - apple/libdispatch.git/blob - src/firehose/firehose_inline_internal.h
libdispatch-703.30.5.tar.gz
[apple/libdispatch.git] / src / firehose / firehose_inline_internal.h
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __FIREHOSE_INLINE_INTERNAL__
22 #define __FIREHOSE_INLINE_INTERNAL__
23
24 #define firehose_atomic_maxv2o(p, f, v, o, m) \
25 os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
26 if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
27 })
28
29 #define firehose_atomic_max2o(p, f, v, m) ({ \
30 typeof((p)->f) _old; \
31 firehose_atomic_maxv2o(p, f, v, &_old, m); \
32 })
33
34 #ifndef KERNEL
35 // caller must test for non zero first
36 OS_ALWAYS_INLINE
37 static inline uint16_t
38 firehose_bitmap_first_set(uint64_t bitmap)
39 {
40 dispatch_assert(bitmap != 0);
41 // this builtin returns 0 if bitmap is 0, or (first bit set + 1)
42 return (uint16_t)__builtin_ffsll((long long)bitmap) - 1;
43 }
44 #endif
45
46 #pragma mark -
47 #pragma mark Mach Misc.
48 #ifndef KERNEL
49
50 OS_ALWAYS_INLINE
51 static inline mach_port_t
52 firehose_mach_port_allocate(uint32_t flags, void *ctx)
53 {
54 mach_port_t port = MACH_PORT_NULL;
55 mach_port_options_t opts = {
56 .flags = flags,
57 };
58 kern_return_t kr;
59
60 for (;;) {
61 kr = mach_port_construct(mach_task_self(), &opts,
62 (mach_port_context_t)ctx, &port);
63 if (fastpath(kr == KERN_SUCCESS)) {
64 break;
65 }
66 DISPATCH_VERIFY_MIG(kr);
67 dispatch_assume_zero(kr);
68 _dispatch_temporary_resource_shortage();
69 }
70 return port;
71 }
72
73 OS_ALWAYS_INLINE
74 static inline kern_return_t
75 firehose_mach_port_recv_dispose(mach_port_t port, void *ctx)
76 {
77 kern_return_t kr;
78 kr = mach_port_destruct(mach_task_self(), port, 0,
79 (mach_port_context_t)ctx);
80 DISPATCH_VERIFY_MIG(kr);
81 return kr;
82 }
83
84 OS_ALWAYS_INLINE
85 static inline void
86 firehose_mach_port_send_release(mach_port_t port)
87 {
88 kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
89 DISPATCH_VERIFY_MIG(kr);
90 dispatch_assume_zero(kr);
91 }
92
93 OS_ALWAYS_INLINE
94 static inline void
95 firehose_mach_port_guard(mach_port_t port, bool strict, void *ctx)
96 {
97 kern_return_t kr = mach_port_guard(mach_task_self(), port,
98 (mach_port_context_t)ctx, strict);
99 DISPATCH_VERIFY_MIG(kr);
100 dispatch_assume_zero(kr);
101 }
102
103 OS_ALWAYS_INLINE
104 static inline void
105 firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz,
106 mach_msg_header_t *hdr)
107 {
108 mig_reply_error_t *msg_reply = (mig_reply_error_t *)alloca(maxmsgsz);
109 kern_return_t rc = KERN_SUCCESS;
110 bool expects_reply = false;
111
112 if (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
113 expects_reply = true;
114 }
115
116 if (!fastpath(demux(hdr, &msg_reply->Head))) {
117 rc = MIG_BAD_ID;
118 } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
119 rc = KERN_SUCCESS;
120 } else {
121 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode
122 // is present
123 rc = msg_reply->RetCode;
124 }
125
126 if (slowpath(rc == KERN_SUCCESS && expects_reply)) {
127 // if crashing here, some handler returned KERN_SUCCESS
128 // hoping for firehose_mig_server to perform the mach_msg()
129 // call to reply, and it doesn't know how to do that
130 DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id,
131 "firehose_mig_server doesn't handle replies");
132 }
133 if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) {
134 // destroy the request - but not the reply port
135 hdr->msgh_remote_port = 0;
136 mach_msg_destroy(hdr);
137 }
138 }
139
140 #endif // !KERNEL
141 #pragma mark -
142 #pragma mark firehose buffer
143
144 OS_ALWAYS_INLINE
145 static inline firehose_buffer_chunk_t
146 firehose_buffer_chunk_for_address(void *addr)
147 {
148 uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1);
149 return (firehose_buffer_chunk_t)chunk_addr;
150 }
151
152 OS_ALWAYS_INLINE
153 static inline uint16_t
154 firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc)
155 {
156 return (uint16_t)(fbc - fb->fb_chunks);
157 }
158
159 OS_ALWAYS_INLINE
160 static inline firehose_buffer_chunk_t
161 firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref)
162 {
163 return fb->fb_chunks + ref;
164 }
165
166 #ifndef FIREHOSE_SERVER
167
168 OS_ALWAYS_INLINE
169 static inline bool
170 firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size)
171 {
172 return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs;
173 }
174
175 #if DISPATCH_PURE_C
176
177 OS_ALWAYS_INLINE
178 static inline uint8_t
179 firehose_buffer_qos_bits_propagate(void)
180 {
181 #ifndef KERNEL
182 pthread_priority_t pp = _dispatch_priority_propagate();
183
184 pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
185 return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT);
186 #else
187 return 0;
188 #endif
189 }
190
191 OS_ALWAYS_INLINE
192 static inline long
193 firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp,
194 firehose_stream_t stream, uint16_t pubsize,
195 uint16_t privsize, uint8_t **privptr)
196 {
197 const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
198 firehose_buffer_pos_u orig, pos;
199 uint8_t qos_bits = firehose_buffer_qos_bits_propagate();
200 bool reservation_failed, stamp_delta_fits;
201
202 stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0;
203
204 // no acquire barrier because the returned space is written to only
205 os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos,
206 orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, {
207 if (unlikely(orig.fbc_atomic_pos == 0)) {
208 // we acquired a really really old reference, and we probably
209 // just faulted in a new page
210 // FIXME: if/when we hit this we should try to madvise it back FREE
211 os_atomic_rmw_loop_give_up(return 0);
212 }
213 if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) {
214 // nothing to do if the chunk is full, or the stream doesn't match,
215 // in which case the thread probably:
216 // - loaded the chunk ref
217 // - been suspended a long while
218 // - read the chunk to find a very old thing
219 os_atomic_rmw_loop_give_up(return 0);
220 }
221 pos = orig;
222 pos.fbc_qos_bits |= qos_bits;
223 if (unlikely(!firehose_buffer_pos_fits(orig,
224 ft_size + pubsize + privsize) || !stamp_delta_fits)) {
225 pos.fbc_flag_full = true;
226 reservation_failed = true;
227 } else {
228 // using these *_INC macros is so that the compiler generates better
229 // assembly: using the struct individual fields forces the compiler
230 // to handle carry propagations, and we know it won't happen
231 pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) *
232 FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC;
233 pos.fbc_atomic_pos -= privsize *
234 FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC;
235 pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC;
236 const uint16_t minimum_payload_size = 16;
237 if (!firehose_buffer_pos_fits(pos,
238 roundup(ft_size + minimum_payload_size , 8))) {
239 // if we can't even have minimum_payload_size bytes of payload
240 // for the next tracepoint, just flush right away
241 pos.fbc_flag_full = true;
242 }
243 reservation_failed = false;
244 }
245 });
246
247 if (reservation_failed) {
248 if (pos.fbc_refcnt) {
249 // nothing to do, there is a thread writing that will pick up
250 // the "FULL" flag on flush and push as a consequence
251 return 0;
252 }
253 // caller must enqueue chunk
254 return -1;
255 }
256 if (privptr) {
257 *privptr = fbc->fbc_start + pos.fbc_private_offs;
258 }
259 return orig.fbc_next_entry_offs;
260 }
261
262 OS_ALWAYS_INLINE
263 static inline void
264 firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream)
265 {
266 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
267 firehose_stream_state_u old_state, new_state;
268 firehose_buffer_chunk_t fbc;
269 uint64_t stamp = UINT64_MAX; // will cause the reservation to fail
270 uint16_t ref;
271 long result;
272
273 old_state.fss_atomic_state =
274 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
275 ref = old_state.fss_current;
276 if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) {
277 // there is no installed page, nothing to flush, go away
278 return;
279 }
280
281 fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
282 result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL);
283 if (likely(result < 0)) {
284 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
285 }
286 if (unlikely(result > 0)) {
287 // because we pass a silly stamp that requires a flush
288 DISPATCH_INTERNAL_CRASH(result, "Allocation should always fail");
289 }
290
291 // as a best effort try to uninstall the page we just flushed
292 // but failing is okay, let's not contend stupidly for something
293 // allocators know how to handle in the first place
294 new_state = old_state;
295 new_state.fss_current = 0;
296 (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state,
297 old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed);
298 }
299
300 /**
301 * @function firehose_buffer_tracepoint_reserve
302 *
303 * @abstract
304 * Reserves space in the firehose buffer for the tracepoint with specified
305 * characteristics.
306 *
307 * @discussion
308 * This returns a slot, with the length of the tracepoint already set, so
309 * that in case of a crash, we maximize our chance to be able to skip the
310 * tracepoint in case of a partial write.
311 *
312 * Once the tracepoint has been written, firehose_buffer_tracepoint_flush()
313 * must be called.
314 *
315 * @param fb
316 * The buffer to allocate from.
317 *
318 * @param stream
319 * The buffer stream to use.
320 *
321 * @param pubsize
322 * The size of the public data for this tracepoint, cannot be 0, doesn't
323 * take the size of the tracepoint header into account.
324 *
325 * @param privsize
326 * The size of the private data for this tracepoint, can be 0.
327 *
328 * @param privptr
329 * The pointer to the private buffer, can be NULL
330 *
331 * @result
332 * The pointer to the tracepoint.
333 */
334 OS_ALWAYS_INLINE
335 static inline firehose_tracepoint_t
336 firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp,
337 firehose_stream_t stream, uint16_t pubsize,
338 uint16_t privsize, uint8_t **privptr)
339 {
340 firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
341 firehose_stream_state_u old_state, new_state;
342 firehose_tracepoint_t ft;
343 firehose_buffer_chunk_t fbc;
344 #if KERNEL
345 bool failable = false;
346 #endif
347 bool success;
348 long result;
349 uint16_t ref;
350
351 // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store
352 old_state.fss_atomic_state =
353 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
354 for (;;) {
355 new_state = old_state;
356
357 ref = old_state.fss_current;
358 if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) {
359 fbc = firehose_buffer_ref_to_chunk(fb, ref);
360 result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream,
361 pubsize, privsize, privptr);
362 if (likely(result > 0)) {
363 ft = (firehose_tracepoint_t)(fbc->fbc_start + result);
364 stamp -= fbc->fbc_timestamp;
365 stamp |= (uint64_t)pubsize << 48;
366 // Needed for process death handling (tracepoint-begin)
367 // see firehose_buffer_stream_chunk_install
368 os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed);
369 dispatch_compiler_barrier();
370 return ft;
371 }
372 if (likely(result < 0)) {
373 firehose_buffer_ring_enqueue(fb, old_state.fss_current);
374 }
375 new_state.fss_current = 0;
376 }
377 #if KERNEL
378 if (failable) {
379 return NULL;
380 }
381 #endif
382
383 if (unlikely(old_state.fss_allocator)) {
384 _dispatch_gate_wait(&fbs->fbs_state.fss_gate,
385 DLOCK_LOCK_DATA_CONTENTION);
386 old_state.fss_atomic_state =
387 os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed);
388 #if KERNEL
389 failable = true;
390 #endif
391 continue;
392 }
393
394 // if the thread doing the allocation is a low priority one
395 // we may starve high priority ones.
396 // so disable preemption before we become an allocator
397 // the reenabling of the preemption is in
398 // firehose_buffer_stream_chunk_install
399 __firehose_critical_region_enter();
400 #if KERNEL
401 new_state.fss_allocator = (uint32_t)cpu_number();
402 #else
403 new_state.fss_allocator = _dispatch_tid_self();
404 #endif
405 success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state,
406 old_state.fss_atomic_state, new_state.fss_atomic_state,
407 &old_state.fss_atomic_state, relaxed);
408 if (likely(success)) {
409 break;
410 }
411 __firehose_critical_region_leave();
412 }
413
414 struct firehose_tracepoint_query_s ask = {
415 .pubsize = pubsize,
416 .privsize = privsize,
417 .stream = stream,
418 .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0,
419 .stamp = stamp,
420 };
421 return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr);
422 }
423
424 /**
425 * @function firehose_buffer_tracepoint_flush
426 *
427 * @abstract
428 * Flushes a firehose tracepoint, and sends the chunk to the daemon when full
429 * and this was the last tracepoint writer for this chunk.
430 *
431 * @param fb
432 * The buffer the tracepoint belongs to.
433 *
434 * @param ft
435 * The tracepoint to flush.
436 *
437 * @param ftid
438 * The firehose tracepoint ID for that tracepoint.
439 * It is written last, preventing compiler reordering, so that its absence
440 * on crash recovery means the tracepoint is partial.
441 */
442 OS_ALWAYS_INLINE
443 static inline void
444 firehose_buffer_tracepoint_flush(firehose_buffer_t fb,
445 firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
446 {
447 firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft);
448 firehose_buffer_pos_u pos;
449
450 // Needed for process death handling (tracepoint-flush):
451 // We want to make sure the observers
452 // will see memory effects in program (asm) order.
453 // 1. write all the data to the tracepoint
454 // 2. write the tracepoint ID, so that seeing it means the tracepoint
455 // is valid
456 #ifdef KERNEL
457 ft->ft_thread = thread_tid(current_thread());
458 #else
459 ft->ft_thread = _pthread_threadid_self_np_direct();
460 #endif
461 // release barrier makes the log writes visible
462 os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release);
463 pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos,
464 FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed);
465 if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) {
466 firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc));
467 }
468 }
469
470 #ifndef KERNEL
471 OS_ALWAYS_INLINE
472 static inline void
473 firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits)
474 {
475 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
476 unsigned long orig_flags;
477
478 orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed);
479 if (orig_flags != (orig_flags & ~bits)) {
480 firehose_buffer_update_limits(fb);
481 }
482 }
483
484 OS_ALWAYS_INLINE
485 static inline void
486 firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits)
487 {
488 firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank;
489 unsigned long orig_flags;
490
491 orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed);
492 if (orig_flags != (orig_flags | bits)) {
493 firehose_buffer_update_limits(fb);
494 }
495 }
496 #endif // !KERNEL
497
498 #endif // !defined(FIREHOSE_SERVER)
499
500 #endif // DISPATCH_PURE_C
501
502 #endif // __FIREHOSE_INLINE_INTERNAL__