2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __FIREHOSE_CHUNK_PRIVATE__
22 #define __FIREHOSE_CHUNK_PRIVATE__
25 #include <machine/atomic.h>
27 #include <stdatomic.h>
28 #include <sys/param.h>
30 #include "firehose_types_private.h"
31 #include "tracepoint_private.h"
35 #define FIREHOSE_CHUNK_SIZE 4096ul
37 #define FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC (1ULL << 0)
38 #define FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC (1ULL << 16)
39 #define FIREHOSE_CHUNK_POS_REFCNT_INC (1ULL << 32)
40 #define FIREHOSE_CHUNK_POS_FULL_BIT (1ULL << 56)
41 #define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
42 ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
45 _Atomic(uint64_t) fcp_atomic_pos
;
48 uint16_t fcp_next_entry_offs
;
49 uint16_t fcp_private_offs
;
53 uint8_t fcp_flag_full
: 1;
54 uint8_t fcp_flag_io
: 1;
55 uint8_t fcp_quarantined
: 1;
56 uint8_t _fcp_flag_unused
: 5;
58 } firehose_chunk_pos_u
;
60 typedef struct firehose_chunk_s
{
62 firehose_chunk_pos_u fc_pos
;
63 uint64_t fc_timestamp
;
64 uint8_t fc_data
[FIREHOSE_CHUNK_SIZE
- 8 - 8];
67 typedef struct firehose_chunk_range_s
{
68 uint16_t fcr_offset
; // offset from the start of the chunk
70 } *firehose_chunk_range_t
;
72 #if defined(KERNEL) || defined(OS_FIREHOSE_SPI)
76 firehose_chunk_pos_fits(firehose_chunk_pos_u pos
, uint16_t size
)
78 return pos
.fcp_next_entry_offs
+ size
<= pos
.fcp_private_offs
;
81 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE (-1)
82 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL ( 0)
86 firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc
, uint64_t stamp
,
87 firehose_stream_t stream
, uint8_t qos
, uint16_t pubsize
,
88 uint16_t privsize
, uint8_t **privptr
)
90 const uint16_t ft_size
= offsetof(struct firehose_tracepoint_s
, ft_data
);
91 firehose_chunk_pos_u orig
, pos
;
92 bool reservation_failed
, stamp_delta_fits
;
94 stamp_delta_fits
= ((stamp
- fc
->fc_timestamp
) >> 48) == 0;
96 // no acquire barrier because the returned space is written to only
97 os_atomic_rmw_loop(&fc
->fc_pos
.fcp_atomic_pos
,
98 orig
.fcp_pos
, pos
.fcp_pos
, relaxed
, {
99 if (orig
.fcp_pos
== 0) {
100 // we acquired a really really old reference, and we probably
101 // just faulted in a new page
102 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
);
104 if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig
, stream
)) {
105 // nothing to do if the chunk is full, or the stream doesn't match,
106 // in which case the thread probably:
107 // - loaded the chunk ref
108 // - been suspended a long while
109 // - read the chunk to find a very old thing
110 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
);
113 if (!firehose_chunk_pos_fits(orig
,
114 ft_size
+ pubsize
+ privsize
) || !stamp_delta_fits
) {
115 pos
.fcp_flag_full
= true;
116 reservation_failed
= true;
118 if (qos
> pos
.fcp_qos
) pos
.fcp_qos
= qos
;
119 // using these *_INC macros is so that the compiler generates better
120 // assembly: using the struct individual fields forces the compiler
121 // to handle carry propagations, and we know it won't happen
122 pos
.fcp_pos
+= roundup(ft_size
+ pubsize
, 8) *
123 FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC
;
124 pos
.fcp_pos
-= privsize
* FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC
;
125 pos
.fcp_pos
+= FIREHOSE_CHUNK_POS_REFCNT_INC
;
126 const uint16_t minimum_payload_size
= 16;
127 if (!firehose_chunk_pos_fits(pos
,
128 roundup(ft_size
+ minimum_payload_size
, 8))) {
129 // if we can't even have minimum_payload_size bytes of payload
130 // for the next tracepoint, just flush right away
131 pos
.fcp_flag_full
= true;
133 reservation_failed
= false;
137 if (reservation_failed
) {
138 if (pos
.fcp_refcnt
) {
139 // nothing to do, there is a thread writing that will pick up
140 // the "FULL" flag on flush and push as a consequence
141 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL
;
143 // caller must enqueue chunk
144 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE
;
147 *privptr
= fc
->fc_start
+ pos
.fcp_private_offs
;
149 return orig
.fcp_next_entry_offs
;
153 static inline firehose_tracepoint_t
154 firehose_chunk_tracepoint_begin(firehose_chunk_t fc
, uint64_t stamp
,
155 uint16_t pubsize
, uint64_t thread_id
, long offset
)
157 firehose_tracepoint_t ft
= (firehose_tracepoint_t
)
158 __builtin_assume_aligned(fc
->fc_start
+ offset
, 8);
159 stamp
-= fc
->fc_timestamp
;
160 stamp
|= (uint64_t)pubsize
<< 48;
161 // The compiler barrier is needed for userland process death handling, see
162 // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
163 atomic_store_explicit(&ft
->ft_atomic_stamp_and_length
, stamp
,
164 memory_order_relaxed
);
165 __asm__
__volatile__("" ::: "memory");
166 ft
->ft_thread
= thread_id
;
172 firehose_chunk_tracepoint_end(firehose_chunk_t fc
,
173 firehose_tracepoint_t ft
, firehose_tracepoint_id_u ftid
)
175 firehose_chunk_pos_u pos
;
177 atomic_store_explicit(&ft
->ft_id
.ftid_atomic_value
,
178 ftid
.ftid_value
, memory_order_release
);
179 pos
.fcp_pos
= atomic_fetch_sub_explicit(&fc
->fc_pos
.fcp_atomic_pos
,
180 FIREHOSE_CHUNK_POS_REFCNT_INC
, memory_order_relaxed
);
181 return pos
.fcp_refcnt
== 1 && pos
.fcp_flag_full
;
184 #endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)
188 #endif // __FIREHOSE_CHUNK_PRIVATE__