2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __FIREHOSE_CHUNK_PRIVATE__
22 #define __FIREHOSE_CHUNK_PRIVATE__
24 #include <sys/param.h>
25 #include "firehose_types_private.h"
26 #include "tracepoint_private.h"
30 #define FIREHOSE_CHUNK_SIZE 4096ul
32 #define FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC (1ULL << 0)
33 #define FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC (1ULL << 16)
34 #define FIREHOSE_CHUNK_POS_REFCNT_INC (1ULL << 32)
35 #define FIREHOSE_CHUNK_POS_FULL_BIT (1ULL << 56)
36 #define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
37 ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
40 os_atomic(uint64_t) fcp_atomic_pos
;
43 uint16_t fcp_next_entry_offs
;
44 uint16_t fcp_private_offs
;
48 uint8_t fcp_flag_full
: 1;
49 uint8_t fcp_flag_io
: 1;
50 uint8_t fcp_quarantined
: 1;
51 uint8_t _fcp_flag_unused
: 5;
53 } firehose_chunk_pos_u
;
55 typedef struct firehose_chunk_s
{
57 firehose_chunk_pos_u fc_pos
;
58 uint64_t fc_timestamp
;
59 uint8_t fc_data
[FIREHOSE_CHUNK_SIZE
- 8 - 8];
62 typedef struct firehose_chunk_range_s
{
63 uint16_t fcr_offset
; // offset from the start of the chunk
65 } *firehose_chunk_range_t
;
67 #if __has_include(<os/atomic_private.h>)
68 #if defined(KERNEL) || defined(OS_FIREHOSE_SPI)
72 firehose_chunk_pos_fits(firehose_chunk_pos_u
*pos
, uint16_t size
)
74 return pos
->fcp_next_entry_offs
+ size
<= pos
->fcp_private_offs
;
77 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE (-1)
78 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL ( 0)
80 #if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
83 firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc
, uint64_t stamp
,
84 firehose_stream_t stream
, uint8_t qos
, uint16_t pubsize
,
85 uint16_t privsize
, uint8_t **privptr
)
87 const uint16_t ft_size
= offsetof(struct firehose_tracepoint_s
, ft_data
);
88 firehose_chunk_pos_u orig
, pos
;
89 bool reservation_failed
, stamp_delta_fits
;
91 stamp_delta_fits
= ((stamp
- fc
->fc_timestamp
) >> 48) == 0;
93 // no acquire barrier because the returned space is written to only
94 os_atomic_rmw_loop(&fc
->fc_pos
.fcp_atomic_pos
,
95 orig
.fcp_pos
, pos
.fcp_pos
, relaxed
, {
96 if (orig
.fcp_pos
== 0) {
97 // we acquired a really really old reference, and we probably
98 // just faulted in a new page
99 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
);
101 if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig
, stream
)) {
102 // nothing to do if the chunk is full, or the stream doesn't match,
103 // in which case the thread probably:
104 // - loaded the chunk ref
105 // - been suspended a long while
106 // - read the chunk to find a very old thing
107 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
);
109 pos
.fcp_pos
= orig
.fcp_pos
;
110 if (!firehose_chunk_pos_fits(&orig
,
111 ft_size
+ pubsize
+ privsize
) || !stamp_delta_fits
) {
112 pos
.fcp_flag_full
= true;
113 reservation_failed
= true;
115 if (qos
> pos
.fcp_qos
) {
118 // using these *_INC macros is so that the compiler generates better
119 // assembly: using the struct individual fields forces the compiler
120 // to handle carry propagations, and we know it won't happen
121 pos
.fcp_pos
+= roundup(ft_size
+ pubsize
, 8) *
122 FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC
;
123 pos
.fcp_pos
-= privsize
* FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC
;
124 pos
.fcp_pos
+= FIREHOSE_CHUNK_POS_REFCNT_INC
;
125 const uint16_t minimum_payload_size
= 16;
126 if (!firehose_chunk_pos_fits(&pos
,
127 roundup(ft_size
+ minimum_payload_size
, 8))) {
128 // if we can't even have minimum_payload_size bytes of payload
129 // for the next tracepoint, just flush right away
130 pos
.fcp_flag_full
= true;
132 reservation_failed
= false;
136 if (reservation_failed
) {
137 if (pos
.fcp_refcnt
) {
138 // nothing to do, there is a thread writing that will pick up
139 // the "FULL" flag on flush and push as a consequence
140 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
;
142 // caller must enqueue chunk
143 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE
;
146 *privptr
= (uint8_t *)((uintptr_t)fc
->fc_start
+ pos
.fcp_private_offs
);
148 return orig
.fcp_next_entry_offs
;
152 static inline firehose_tracepoint_t
153 firehose_chunk_tracepoint_begin(firehose_chunk_t fc
, uint64_t stamp
,
154 uint16_t pubsize
, uint64_t thread_id
, long offset
)
156 firehose_tracepoint_t ft
= (firehose_tracepoint_t
)
157 __builtin_assume_aligned((void *)((uintptr_t)fc
->fc_start
+ (uintptr_t)offset
), 8);
158 stamp
-= fc
->fc_timestamp
;
159 stamp
|= (uint64_t)pubsize
<< 48;
160 // The compiler barrier is needed for userland process death handling, see
161 // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
162 os_atomic_std(atomic_store_explicit
)(&ft
->ft_atomic_stamp_and_length
, stamp
,
163 os_atomic_std(memory_order_relaxed
));
164 __asm__
__volatile__ ("" ::: "memory");
165 ft
->ft_thread
= thread_id
;
171 firehose_chunk_tracepoint_end(firehose_chunk_t fc
,
172 firehose_tracepoint_t ft
, firehose_tracepoint_id_u ftid
)
174 firehose_chunk_pos_u pos
;
176 os_atomic_std(atomic_store_explicit
)(&ft
->ft_id
.ftid_atomic_value
,
177 ftid
.ftid_value
, os_atomic_std(memory_order_release
));
178 pos
.fcp_pos
= os_atomic_std(atomic_fetch_sub_explicit
)(&fc
->fc_pos
.fcp_atomic_pos
,
179 FIREHOSE_CHUNK_POS_REFCNT_INC
, os_atomic_std(memory_order_relaxed
));
180 return pos
.fcp_refcnt
== 1 && pos
.fcp_flag_full
;
182 #endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
184 #endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)
185 #endif // __has_include(<os/atomic_private.h>)
189 #endif // __FIREHOSE_CHUNK_PRIVATE__