2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __FIREHOSE_INLINE_INTERNAL__
22 #define __FIREHOSE_INLINE_INTERNAL__
24 #ifndef _os_atomic_basetypeof
25 #define _os_atomic_basetypeof(p) \
26 typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
29 #define firehose_atomic_maxv2o(p, f, v, o, m) \
30 os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
31 if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
34 #define firehose_atomic_max2o(p, f, v, m) ({ \
35 _os_atomic_basetypeof(&(p)->f) _old; \
36 firehose_atomic_maxv2o(p, f, v, &_old, m); \
40 // caller must test for non zero first
42 static inline uint16_t
43 firehose_bitmap_first_set(uint64_t bitmap
)
45 dispatch_assert(bitmap
!= 0);
46 // this builtin returns 0 if bitmap is 0, or (first bit set + 1)
47 return (uint16_t)__builtin_ffsll((long long)bitmap
) - 1;
52 #pragma mark Mach Misc.
56 static inline mach_port_t
57 firehose_mach_port_allocate(uint32_t flags
, void *ctx
)
59 mach_port_t port
= MACH_PORT_NULL
;
60 mach_port_options_t opts
= {
63 kern_return_t kr
= mach_port_construct(mach_task_self(), &opts
,
64 (mach_port_context_t
)ctx
, &port
);
66 DISPATCH_VERIFY_MIG(kr
);
67 DISPATCH_CLIENT_CRASH(kr
, "Unable to allocate mach port");
73 static inline kern_return_t
74 firehose_mach_port_recv_dispose(mach_port_t port
, void *ctx
)
77 kr
= mach_port_destruct(mach_task_self(), port
, 0,
78 (mach_port_context_t
)ctx
);
79 DISPATCH_VERIFY_MIG(kr
);
85 firehose_mach_port_send_release(mach_port_t port
)
87 kern_return_t kr
= mach_port_deallocate(mach_task_self(), port
);
88 DISPATCH_VERIFY_MIG(kr
);
89 dispatch_assume_zero(kr
);
94 firehose_mach_port_guard(mach_port_t port
, bool strict
, void *ctx
)
96 kern_return_t kr
= mach_port_guard(mach_task_self(), port
,
97 (mach_port_context_t
)ctx
, strict
);
98 DISPATCH_VERIFY_MIG(kr
);
99 dispatch_assume_zero(kr
);
104 firehose_mig_server(dispatch_mig_callback_t demux
, size_t maxmsgsz
,
105 mach_msg_header_t
*hdr
)
107 mig_reply_error_t
*msg_reply
= (mig_reply_error_t
*)alloca(maxmsgsz
);
108 kern_return_t rc
= KERN_SUCCESS
;
109 bool expects_reply
= false;
111 if (MACH_MSGH_BITS_REMOTE(hdr
->msgh_bits
) == MACH_MSG_TYPE_MOVE_SEND_ONCE
) {
112 expects_reply
= true;
115 if (!fastpath(demux(hdr
, &msg_reply
->Head
))) {
117 } else if (msg_reply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
120 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode
122 rc
= msg_reply
->RetCode
;
125 if (slowpath(rc
== KERN_SUCCESS
&& expects_reply
)) {
126 // if crashing here, some handler returned KERN_SUCCESS
127 // hoping for firehose_mig_server to perform the mach_msg()
128 // call to reply, and it doesn't know how to do that
129 DISPATCH_INTERNAL_CRASH(msg_reply
->Head
.msgh_id
,
130 "firehose_mig_server doesn't handle replies");
132 if (slowpath(rc
!= KERN_SUCCESS
&& rc
!= MIG_NO_REPLY
)) {
133 // destroy the request - but not the reply port
134 hdr
->msgh_remote_port
= 0;
135 mach_msg_destroy(hdr
);
141 #pragma mark firehose buffer
144 static inline firehose_chunk_t
145 firehose_buffer_chunk_for_address(void *addr
)
147 uintptr_t chunk_addr
= (uintptr_t)addr
& ~(FIREHOSE_CHUNK_SIZE
- 1);
148 return (firehose_chunk_t
)chunk_addr
;
152 static inline uint16_t
153 firehose_buffer_chunk_to_ref(firehose_buffer_t fb
, firehose_chunk_t fbc
)
155 return (uint16_t)(fbc
- fb
->fb_chunks
);
159 static inline firehose_chunk_t
160 firehose_buffer_ref_to_chunk(firehose_buffer_t fb
, uint16_t ref
)
162 return fb
->fb_chunks
+ ref
;
165 #ifndef FIREHOSE_SERVER
169 static inline uint8_t
170 firehose_buffer_qos_bits_propagate(void)
173 pthread_priority_t pp
= _dispatch_priority_propagate();
175 pp
&= _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
176 return (uint8_t)(pp
>> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
);
184 firehose_buffer_stream_flush(firehose_buffer_t fb
, firehose_stream_t stream
)
186 firehose_buffer_stream_t fbs
= &fb
->fb_header
.fbh_stream
[stream
];
187 firehose_stream_state_u old_state
, new_state
;
189 uint64_t stamp
= UINT64_MAX
; // will cause the reservation to fail
193 old_state
.fss_atomic_state
=
194 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
195 ref
= old_state
.fss_current
;
196 if (!ref
|| ref
== FIREHOSE_STREAM_STATE_PRISTINE
) {
197 // there is no installed page, nothing to flush, go away
199 firehose_buffer_force_connect(fb
);
204 fc
= firehose_buffer_ref_to_chunk(fb
, old_state
.fss_current
);
205 result
= firehose_chunk_tracepoint_try_reserve(fc
, stamp
, stream
,
206 firehose_buffer_qos_bits_propagate(), 1, 0, NULL
);
207 if (likely(result
< 0)) {
208 firehose_buffer_ring_enqueue(fb
, old_state
.fss_current
);
210 if (unlikely(result
> 0)) {
211 // because we pass a silly stamp that requires a flush
212 DISPATCH_INTERNAL_CRASH(result
, "Allocation should always fail");
215 // as a best effort try to uninstall the page we just flushed
216 // but failing is okay, let's not contend stupidly for something
217 // allocators know how to handle in the first place
218 new_state
= old_state
;
219 new_state
.fss_current
= 0;
220 (void)os_atomic_cmpxchg2o(fbs
, fbs_state
.fss_atomic_state
,
221 old_state
.fss_atomic_state
, new_state
.fss_atomic_state
, relaxed
);
225 * @function firehose_buffer_tracepoint_reserve
228 * Reserves space in the firehose buffer for the tracepoint with specified
232 * This returns a slot, with the length of the tracepoint already set, so
233 * that in case of a crash, we maximize our chance to be able to skip the
234 * tracepoint in case of a partial write.
236 * Once the tracepoint has been written, firehose_buffer_tracepoint_flush()
240 * The buffer to allocate from.
243 * The buffer stream to use.
246 * The size of the public data for this tracepoint, cannot be 0, doesn't
247 * take the size of the tracepoint header into account.
250 * The size of the private data for this tracepoint, can be 0.
253 * The pointer to the private buffer, can be NULL
256 * The pointer to the tracepoint.
259 static inline firehose_tracepoint_t
260 firehose_buffer_tracepoint_reserve(firehose_buffer_t fb
, uint64_t stamp
,
261 firehose_stream_t stream
, uint16_t pubsize
,
262 uint16_t privsize
, uint8_t **privptr
)
264 firehose_buffer_stream_t fbs
= &fb
->fb_header
.fbh_stream
[stream
];
265 firehose_stream_state_u old_state
, new_state
;
268 bool failable
= false;
274 // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store
275 old_state
.fss_atomic_state
=
276 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
278 new_state
= old_state
;
280 ref
= old_state
.fss_current
;
281 if (likely(ref
&& ref
!= FIREHOSE_STREAM_STATE_PRISTINE
)) {
282 fc
= firehose_buffer_ref_to_chunk(fb
, ref
);
283 result
= firehose_chunk_tracepoint_try_reserve(fc
, stamp
, stream
,
284 firehose_buffer_qos_bits_propagate(),
285 pubsize
, privsize
, privptr
);
286 if (likely(result
> 0)) {
289 thread
= thread_tid(current_thread());
291 thread
= _pthread_threadid_self_np_direct();
293 return firehose_chunk_tracepoint_begin(fc
,
294 stamp
, pubsize
, thread
, result
);
296 if (likely(result
< 0)) {
297 firehose_buffer_ring_enqueue(fb
, old_state
.fss_current
);
299 new_state
.fss_current
= 0;
307 if (unlikely(old_state
.fss_allocator
)) {
308 _dispatch_gate_wait(&fbs
->fbs_state
.fss_gate
,
309 DLOCK_LOCK_DATA_CONTENTION
);
310 old_state
.fss_atomic_state
=
311 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
318 // if the thread doing the allocation is a low priority one
319 // we may starve high priority ones.
320 // so disable preemption before we become an allocator
321 // the reenabling of the preemption is in
322 // firehose_buffer_stream_chunk_install
323 __firehose_critical_region_enter();
325 new_state
.fss_allocator
= (uint32_t)cpu_number();
327 new_state
.fss_allocator
= _dispatch_lock_value_for_self();
329 success
= os_atomic_cmpxchgv2o(fbs
, fbs_state
.fss_atomic_state
,
330 old_state
.fss_atomic_state
, new_state
.fss_atomic_state
,
331 &old_state
.fss_atomic_state
, relaxed
);
332 if (likely(success
)) {
335 __firehose_critical_region_leave();
338 struct firehose_tracepoint_query_s ask
= {
340 .privsize
= privsize
,
342 .for_io
= (firehose_stream_uses_io_bank
& (1UL << stream
)) != 0,
344 .quarantined
= fb
->fb_header
.fbh_quarantined
,
348 return firehose_buffer_tracepoint_reserve_slow(fb
, &ask
, privptr
);
352 * @function firehose_buffer_tracepoint_flush
355 * Flushes a firehose tracepoint, and sends the chunk to the daemon when full
356 * and this was the last tracepoint writer for this chunk.
359 * The buffer the tracepoint belongs to.
362 * The tracepoint to flush.
365 * The firehose tracepoint ID for that tracepoint.
366 * It is written last, preventing compiler reordering, so that its absence
367 * on crash recovery means the tracepoint is partial.
371 firehose_buffer_tracepoint_flush(firehose_buffer_t fb
,
372 firehose_tracepoint_t ft
, firehose_tracepoint_id_u ftid
)
374 firehose_chunk_t fc
= firehose_buffer_chunk_for_address(ft
);
376 // Needed for process death handling (tracepoint-flush):
377 // We want to make sure the observers
378 // will see memory effects in program (asm) order.
379 // 1. write all the data to the tracepoint
380 // 2. write the tracepoint ID, so that seeing it means the tracepoint
382 if (firehose_chunk_tracepoint_end(fc
, ft
, ftid
)) {
383 firehose_buffer_ring_enqueue(fb
, firehose_buffer_chunk_to_ref(fb
, fc
));
390 firehose_buffer_clear_bank_flags(firehose_buffer_t fb
, unsigned long bits
)
392 firehose_buffer_bank_t fbb
= &fb
->fb_header
.fbh_bank
;
393 unsigned long orig_flags
;
395 orig_flags
= os_atomic_and_orig2o(fbb
, fbb_flags
, ~bits
, relaxed
);
396 if (orig_flags
!= (orig_flags
& ~bits
)) {
397 firehose_buffer_update_limits(fb
);
403 firehose_buffer_set_bank_flags(firehose_buffer_t fb
, unsigned long bits
)
405 firehose_buffer_bank_t fbb
= &fb
->fb_header
.fbh_bank
;
406 unsigned long orig_flags
;
408 orig_flags
= os_atomic_or_orig2o(fbb
, fbb_flags
, bits
, relaxed
);
409 if (orig_flags
!= (orig_flags
| bits
)) {
410 firehose_buffer_update_limits(fb
);
415 #endif // !defined(FIREHOSE_SERVER)
417 #endif // DISPATCH_PURE_C
419 #endif // __FIREHOSE_INLINE_INTERNAL__