]> git.saurik.com Git - apple/xnu.git/blame_incremental - libkern/firehose/chunk_private.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / firehose / chunk_private.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#ifndef __FIREHOSE_CHUNK_PRIVATE__
22#define __FIREHOSE_CHUNK_PRIVATE__
23
24#include <sys/param.h>
25#include "firehose_types_private.h"
26#include "tracepoint_private.h"
27
28__BEGIN_DECLS
29
30#define FIREHOSE_CHUNK_SIZE 4096ul
31
32#define FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC (1ULL << 0)
33#define FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC (1ULL << 16)
34#define FIREHOSE_CHUNK_POS_REFCNT_INC (1ULL << 32)
35#define FIREHOSE_CHUNK_POS_FULL_BIT (1ULL << 56)
36#define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
37 ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
38
39typedef union {
40 os_atomic(uint64_t) fcp_atomic_pos;
41 uint64_t fcp_pos;
42 struct {
43 uint16_t fcp_next_entry_offs;
44 uint16_t fcp_private_offs;
45 uint8_t fcp_refcnt;
46 uint8_t fcp_qos;
47 uint8_t fcp_stream;
48 uint8_t fcp_flag_full : 1;
49 uint8_t fcp_flag_io : 1;
50 uint8_t fcp_quarantined : 1;
51 uint8_t _fcp_flag_unused : 5;
52 };
53} firehose_chunk_pos_u;
54
55typedef struct firehose_chunk_s {
56 uint8_t fc_start[0];
57 firehose_chunk_pos_u fc_pos;
58 uint64_t fc_timestamp;
59 uint8_t fc_data[FIREHOSE_CHUNK_SIZE - 8 - 8];
60} *firehose_chunk_t;
61
62typedef struct firehose_chunk_range_s {
63 uint16_t fcr_offset; // offset from the start of the chunk
64 uint16_t fcr_length;
65} *firehose_chunk_range_t;
66
67#if __has_include(<os/atomic_private.h>)
68#if defined(KERNEL) || defined(OS_FIREHOSE_SPI)
69
70OS_ALWAYS_INLINE
71static inline bool
72firehose_chunk_pos_fits(firehose_chunk_pos_u *pos, uint16_t size)
73{
74 return pos->fcp_next_entry_offs + size <= pos->fcp_private_offs;
75}
76
77#define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE (-1)
78#define FIREHOSE_CHUNK_TRY_RESERVE_FAIL ( 0)
79
80#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
81OS_ALWAYS_INLINE
82static inline long
83firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
84 firehose_stream_t stream, uint8_t qos, uint16_t pubsize,
85 uint16_t privsize, uint8_t **privptr)
86{
87 const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
88 firehose_chunk_pos_u orig, pos;
89 bool reservation_failed, stamp_delta_fits;
90
91 stamp_delta_fits = ((stamp - fc->fc_timestamp) >> 48) == 0;
92
93 // no acquire barrier because the returned space is written to only
94 os_atomic_rmw_loop(&fc->fc_pos.fcp_atomic_pos,
95 orig.fcp_pos, pos.fcp_pos, relaxed, {
96 if (orig.fcp_pos == 0) {
97 // we acquired a really really old reference, and we probably
98 // just faulted in a new page
99 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
100 }
101 if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig, stream)) {
102 // nothing to do if the chunk is full, or the stream doesn't match,
103 // in which case the thread probably:
104 // - loaded the chunk ref
105 // - been suspended a long while
106 // - read the chunk to find a very old thing
107 os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
108 }
109 pos.fcp_pos = orig.fcp_pos;
110 if (!firehose_chunk_pos_fits(&orig,
111 ft_size + pubsize + privsize) || !stamp_delta_fits) {
112 pos.fcp_flag_full = true;
113 reservation_failed = true;
114 } else {
115 if (qos > pos.fcp_qos) {
116 pos.fcp_qos = qos;
117 }
118 // using these *_INC macros is so that the compiler generates better
119 // assembly: using the struct individual fields forces the compiler
120 // to handle carry propagations, and we know it won't happen
121 pos.fcp_pos += roundup(ft_size + pubsize, 8) *
122 FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC;
123 pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC;
124 pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC;
125 const uint16_t minimum_payload_size = 16;
126 if (!firehose_chunk_pos_fits(&pos,
127 roundup(ft_size + minimum_payload_size, 8))) {
128 // if we can't even have minimum_payload_size bytes of payload
129 // for the next tracepoint, just flush right away
130 pos.fcp_flag_full = true;
131 }
132 reservation_failed = false;
133 }
134 });
135
136 if (reservation_failed) {
137 if (pos.fcp_refcnt) {
138 // nothing to do, there is a thread writing that will pick up
139 // the "FULL" flag on flush and push as a consequence
140 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL;
141 }
142 // caller must enqueue chunk
143 return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE;
144 }
145 if (privptr) {
146 *privptr = (uint8_t *)((uintptr_t)fc->fc_start + pos.fcp_private_offs);
147 }
148 return orig.fcp_next_entry_offs;
149}
150
151OS_ALWAYS_INLINE
152static inline firehose_tracepoint_t
153firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp,
154 uint16_t pubsize, uint64_t thread_id, long offset)
155{
156 firehose_tracepoint_t ft = (firehose_tracepoint_t)
157 __builtin_assume_aligned((void *)((uintptr_t)fc->fc_start + (uintptr_t)offset), 8);
158 stamp -= fc->fc_timestamp;
159 stamp |= (uint64_t)pubsize << 48;
160 // The compiler barrier is needed for userland process death handling, see
161 // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
162 os_atomic_std(atomic_store_explicit)(&ft->ft_atomic_stamp_and_length, stamp,
163 os_atomic_std(memory_order_relaxed));
164 __asm__ __volatile__ ("" ::: "memory");
165 ft->ft_thread = thread_id;
166 return ft;
167}
168
169OS_ALWAYS_INLINE
170static inline bool
171firehose_chunk_tracepoint_end(firehose_chunk_t fc,
172 firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
173{
174 firehose_chunk_pos_u pos;
175
176 os_atomic_std(atomic_store_explicit)(&ft->ft_id.ftid_atomic_value,
177 ftid.ftid_value, os_atomic_std(memory_order_release));
178 pos.fcp_pos = os_atomic_std(atomic_fetch_sub_explicit)(&fc->fc_pos.fcp_atomic_pos,
179 FIREHOSE_CHUNK_POS_REFCNT_INC, os_atomic_std(memory_order_relaxed));
180 return pos.fcp_refcnt == 1 && pos.fcp_flag_full;
181}
182#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
183
184#endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)
185#endif // __has_include(<os/atomic_private.h>)
186
187__END_DECLS
188
189#endif // __FIREHOSE_CHUNK_PRIVATE__