2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __FIREHOSE_INLINE_INTERNAL__
22 #define __FIREHOSE_INLINE_INTERNAL__
24 #define firehose_atomic_maxv2o(p, f, v, o, m) \
25 os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \
26 if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \
29 #define firehose_atomic_max2o(p, f, v, m) ({ \
30 typeof((p)->f) _old; \
31 firehose_atomic_maxv2o(p, f, v, &_old, m); \
35 // caller must test for non zero first
37 static inline uint16_t
38 firehose_bitmap_first_set(uint64_t bitmap
)
40 dispatch_assert(bitmap
!= 0);
41 // this builtin returns 0 if bitmap is 0, or (first bit set + 1)
42 return (uint16_t)__builtin_ffsll((long long)bitmap
) - 1;
47 #pragma mark Mach Misc.
51 static inline mach_port_t
52 firehose_mach_port_allocate(uint32_t flags
, void *ctx
)
54 mach_port_t port
= MACH_PORT_NULL
;
55 mach_port_options_t opts
= {
58 kern_return_t kr
= mach_port_construct(mach_task_self(), &opts
,
59 (mach_port_context_t
)ctx
, &port
);
61 DISPATCH_VERIFY_MIG(kr
);
62 DISPATCH_CLIENT_CRASH(kr
, "Unable to allocate mach port");
68 static inline kern_return_t
69 firehose_mach_port_recv_dispose(mach_port_t port
, void *ctx
)
72 kr
= mach_port_destruct(mach_task_self(), port
, 0,
73 (mach_port_context_t
)ctx
);
74 DISPATCH_VERIFY_MIG(kr
);
80 firehose_mach_port_send_release(mach_port_t port
)
82 kern_return_t kr
= mach_port_deallocate(mach_task_self(), port
);
83 DISPATCH_VERIFY_MIG(kr
);
84 dispatch_assume_zero(kr
);
89 firehose_mach_port_guard(mach_port_t port
, bool strict
, void *ctx
)
91 kern_return_t kr
= mach_port_guard(mach_task_self(), port
,
92 (mach_port_context_t
)ctx
, strict
);
93 DISPATCH_VERIFY_MIG(kr
);
94 dispatch_assume_zero(kr
);
99 firehose_mig_server(dispatch_mig_callback_t demux
, size_t maxmsgsz
,
100 mach_msg_header_t
*hdr
)
102 mig_reply_error_t
*msg_reply
= (mig_reply_error_t
*)alloca(maxmsgsz
);
103 kern_return_t rc
= KERN_SUCCESS
;
104 bool expects_reply
= false;
106 if (MACH_MSGH_BITS_REMOTE(hdr
->msgh_bits
) == MACH_MSG_TYPE_MOVE_SEND_ONCE
) {
107 expects_reply
= true;
110 if (!fastpath(demux(hdr
, &msg_reply
->Head
))) {
112 } else if (msg_reply
->Head
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
115 // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode
117 rc
= msg_reply
->RetCode
;
120 if (slowpath(rc
== KERN_SUCCESS
&& expects_reply
)) {
121 // if crashing here, some handler returned KERN_SUCCESS
122 // hoping for firehose_mig_server to perform the mach_msg()
123 // call to reply, and it doesn't know how to do that
124 DISPATCH_INTERNAL_CRASH(msg_reply
->Head
.msgh_id
,
125 "firehose_mig_server doesn't handle replies");
127 if (slowpath(rc
!= KERN_SUCCESS
&& rc
!= MIG_NO_REPLY
)) {
128 // destroy the request - but not the reply port
129 hdr
->msgh_remote_port
= 0;
130 mach_msg_destroy(hdr
);
136 #pragma mark firehose buffer
139 static inline firehose_chunk_t
140 firehose_buffer_chunk_for_address(void *addr
)
142 uintptr_t chunk_addr
= (uintptr_t)addr
& ~(FIREHOSE_CHUNK_SIZE
- 1);
143 return (firehose_chunk_t
)chunk_addr
;
147 static inline uint16_t
148 firehose_buffer_chunk_to_ref(firehose_buffer_t fb
, firehose_chunk_t fbc
)
150 return (uint16_t)(fbc
- fb
->fb_chunks
);
154 static inline firehose_chunk_t
155 firehose_buffer_ref_to_chunk(firehose_buffer_t fb
, uint16_t ref
)
157 return fb
->fb_chunks
+ ref
;
160 #ifndef FIREHOSE_SERVER
164 static inline uint8_t
165 firehose_buffer_qos_bits_propagate(void)
168 pthread_priority_t pp
= _dispatch_priority_propagate();
170 pp
&= _PTHREAD_PRIORITY_QOS_CLASS_MASK
;
171 return (uint8_t)(pp
>> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
);
179 firehose_buffer_stream_flush(firehose_buffer_t fb
, firehose_stream_t stream
)
181 firehose_buffer_stream_t fbs
= &fb
->fb_header
.fbh_stream
[stream
];
182 firehose_stream_state_u old_state
, new_state
;
184 uint64_t stamp
= UINT64_MAX
; // will cause the reservation to fail
188 old_state
.fss_atomic_state
=
189 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
190 ref
= old_state
.fss_current
;
191 if (!ref
|| ref
== FIREHOSE_STREAM_STATE_PRISTINE
) {
192 // there is no installed page, nothing to flush, go away
194 firehose_buffer_force_connect(fb
);
199 fc
= firehose_buffer_ref_to_chunk(fb
, old_state
.fss_current
);
200 result
= firehose_chunk_tracepoint_try_reserve(fc
, stamp
, stream
,
201 firehose_buffer_qos_bits_propagate(), 1, 0, NULL
);
202 if (likely(result
< 0)) {
203 firehose_buffer_ring_enqueue(fb
, old_state
.fss_current
);
205 if (unlikely(result
> 0)) {
206 // because we pass a silly stamp that requires a flush
207 DISPATCH_INTERNAL_CRASH(result
, "Allocation should always fail");
210 // as a best effort try to uninstall the page we just flushed
211 // but failing is okay, let's not contend stupidly for something
212 // allocators know how to handle in the first place
213 new_state
= old_state
;
214 new_state
.fss_current
= 0;
215 (void)os_atomic_cmpxchg2o(fbs
, fbs_state
.fss_atomic_state
,
216 old_state
.fss_atomic_state
, new_state
.fss_atomic_state
, relaxed
);
220 * @function firehose_buffer_tracepoint_reserve
223 * Reserves space in the firehose buffer for the tracepoint with specified
227 * This returns a slot, with the length of the tracepoint already set, so
228 * that in case of a crash, we maximize our chance to be able to skip the
229 * tracepoint in case of a partial write.
231 * Once the tracepoint has been written, firehose_buffer_tracepoint_flush()
235 * The buffer to allocate from.
238 * The buffer stream to use.
241 * The size of the public data for this tracepoint, cannot be 0, doesn't
242 * take the size of the tracepoint header into account.
245 * The size of the private data for this tracepoint, can be 0.
248 * The pointer to the private buffer, can be NULL
251 * The pointer to the tracepoint.
254 static inline firehose_tracepoint_t
255 firehose_buffer_tracepoint_reserve(firehose_buffer_t fb
, uint64_t stamp
,
256 firehose_stream_t stream
, uint16_t pubsize
,
257 uint16_t privsize
, uint8_t **privptr
)
259 firehose_buffer_stream_t fbs
= &fb
->fb_header
.fbh_stream
[stream
];
260 firehose_stream_state_u old_state
, new_state
;
263 bool failable
= false;
269 // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store
270 old_state
.fss_atomic_state
=
271 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
273 new_state
= old_state
;
275 ref
= old_state
.fss_current
;
276 if (likely(ref
&& ref
!= FIREHOSE_STREAM_STATE_PRISTINE
)) {
277 fc
= firehose_buffer_ref_to_chunk(fb
, ref
);
278 result
= firehose_chunk_tracepoint_try_reserve(fc
, stamp
, stream
,
279 firehose_buffer_qos_bits_propagate(),
280 pubsize
, privsize
, privptr
);
281 if (likely(result
> 0)) {
284 thread
= thread_tid(current_thread());
286 thread
= _pthread_threadid_self_np_direct();
288 return firehose_chunk_tracepoint_begin(fc
,
289 stamp
, pubsize
, thread
, result
);
291 if (likely(result
< 0)) {
292 firehose_buffer_ring_enqueue(fb
, old_state
.fss_current
);
294 new_state
.fss_current
= 0;
302 if (unlikely(old_state
.fss_allocator
)) {
303 _dispatch_gate_wait(&fbs
->fbs_state
.fss_gate
,
304 DLOCK_LOCK_DATA_CONTENTION
);
305 old_state
.fss_atomic_state
=
306 os_atomic_load2o(fbs
, fbs_state
.fss_atomic_state
, relaxed
);
313 // if the thread doing the allocation is a low priority one
314 // we may starve high priority ones.
315 // so disable preemption before we become an allocator
316 // the reenabling of the preemption is in
317 // firehose_buffer_stream_chunk_install
318 __firehose_critical_region_enter();
320 new_state
.fss_allocator
= (uint32_t)cpu_number();
322 new_state
.fss_allocator
= _dispatch_lock_value_for_self();
324 success
= os_atomic_cmpxchgv2o(fbs
, fbs_state
.fss_atomic_state
,
325 old_state
.fss_atomic_state
, new_state
.fss_atomic_state
,
326 &old_state
.fss_atomic_state
, relaxed
);
327 if (likely(success
)) {
330 __firehose_critical_region_leave();
333 struct firehose_tracepoint_query_s ask
= {
335 .privsize
= privsize
,
337 .for_io
= (firehose_stream_uses_io_bank
& (1UL << stream
)) != 0,
339 .quarantined
= fb
->fb_header
.fbh_quarantined
,
343 return firehose_buffer_tracepoint_reserve_slow(fb
, &ask
, privptr
);
347 * @function firehose_buffer_tracepoint_flush
350 * Flushes a firehose tracepoint, and sends the chunk to the daemon when full
351 * and this was the last tracepoint writer for this chunk.
354 * The buffer the tracepoint belongs to.
357 * The tracepoint to flush.
360 * The firehose tracepoint ID for that tracepoint.
361 * It is written last, preventing compiler reordering, so that its absence
362 * on crash recovery means the tracepoint is partial.
366 firehose_buffer_tracepoint_flush(firehose_buffer_t fb
,
367 firehose_tracepoint_t ft
, firehose_tracepoint_id_u ftid
)
369 firehose_chunk_t fc
= firehose_buffer_chunk_for_address(ft
);
371 // Needed for process death handling (tracepoint-flush):
372 // We want to make sure the observers
373 // will see memory effects in program (asm) order.
374 // 1. write all the data to the tracepoint
375 // 2. write the tracepoint ID, so that seeing it means the tracepoint
377 if (firehose_chunk_tracepoint_end(fc
, ft
, ftid
)) {
378 firehose_buffer_ring_enqueue(fb
, firehose_buffer_chunk_to_ref(fb
, fc
));
385 firehose_buffer_clear_bank_flags(firehose_buffer_t fb
, unsigned long bits
)
387 firehose_buffer_bank_t fbb
= &fb
->fb_header
.fbh_bank
;
388 unsigned long orig_flags
;
390 orig_flags
= os_atomic_and_orig2o(fbb
, fbb_flags
, ~bits
, relaxed
);
391 if (orig_flags
!= (orig_flags
& ~bits
)) {
392 firehose_buffer_update_limits(fb
);
398 firehose_buffer_set_bank_flags(firehose_buffer_t fb
, unsigned long bits
)
400 firehose_buffer_bank_t fbb
= &fb
->fb_header
.fbh_bank
;
401 unsigned long orig_flags
;
403 orig_flags
= os_atomic_or_orig2o(fbb
, fbb_flags
, bits
, relaxed
);
404 if (orig_flags
!= (orig_flags
| bits
)) {
405 firehose_buffer_update_limits(fb
);
410 #endif // !defined(FIREHOSE_SERVER)
412 #endif // DISPATCH_PURE_C
414 #endif // __FIREHOSE_INLINE_INTERNAL__