]> git.saurik.com Git - apple/xnu.git/blobdiff - libkern/firehose/chunk_private.h
xnu-7195.81.3.tar.gz
[apple/xnu.git] / libkern / firehose / chunk_private.h
index b4fbcd74a0c748d30325c4a2c0395b5b48d872c1..ac3fbe92efdf81560c39bbb7b8bcdabe39835852 100644 (file)
 #ifndef __FIREHOSE_CHUNK_PRIVATE__
 #define __FIREHOSE_CHUNK_PRIVATE__
 
 #ifndef __FIREHOSE_CHUNK_PRIVATE__
 #define __FIREHOSE_CHUNK_PRIVATE__
 
-#if KERNEL
-#include <machine/atomic.h>
-#endif
-#include <stdatomic.h>
 #include <sys/param.h>
 #include <sys/param.h>
-#include <os/base.h>
 #include "firehose_types_private.h"
 #include "tracepoint_private.h"
 
 #include "firehose_types_private.h"
 #include "tracepoint_private.h"
 
@@ -39,10 +34,10 @@ __BEGIN_DECLS
 #define FIREHOSE_CHUNK_POS_REFCNT_INC           (1ULL << 32)
 #define FIREHOSE_CHUNK_POS_FULL_BIT             (1ULL << 56)
 #define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
 #define FIREHOSE_CHUNK_POS_REFCNT_INC           (1ULL << 32)
 #define FIREHOSE_CHUNK_POS_FULL_BIT             (1ULL << 56)
 #define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \
-               ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
+               ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream)
 
 typedef union {
 
 typedef union {
-       _Atomic(uint64_t) fcp_atomic_pos;
+       os_atomic(uint64_t) fcp_atomic_pos;
        uint64_t fcp_pos;
        struct {
                uint16_t fcp_next_entry_offs;
        uint64_t fcp_pos;
        struct {
                uint16_t fcp_next_entry_offs;
@@ -73,19 +68,20 @@ typedef struct firehose_chunk_range_s {
 
 OS_ALWAYS_INLINE
 static inline bool
 
 OS_ALWAYS_INLINE
 static inline bool
-firehose_chunk_pos_fits(firehose_chunk_pos_u pos, uint16_t size)
+firehose_chunk_pos_fits(firehose_chunk_pos_u *pos, uint16_t size)
 {
 {
-       return pos.fcp_next_entry_offs + size <= pos.fcp_private_offs;
+       return pos->fcp_next_entry_offs + size <= pos->fcp_private_offs;
 }
 
 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE  (-1)
 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL          ( 0)
 
 }
 
 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE  (-1)
 #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL          ( 0)
 
+#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
 OS_ALWAYS_INLINE
 static inline long
 firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
 OS_ALWAYS_INLINE
 static inline long
 firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
-               firehose_stream_t stream, uint8_t qos, uint16_t pubsize,
-               uint16_t privsize, uint8_t **privptr)
+    firehose_stream_t stream, uint8_t qos, uint16_t pubsize,
+    uint16_t privsize, uint8_t **privptr)
 {
        const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
        firehose_chunk_pos_u orig, pos;
 {
        const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
        firehose_chunk_pos_u orig, pos;
@@ -95,42 +91,44 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
 
        // no acquire barrier because the returned space is written to only
        os_atomic_rmw_loop(&fc->fc_pos.fcp_atomic_pos,
 
        // no acquire barrier because the returned space is written to only
        os_atomic_rmw_loop(&fc->fc_pos.fcp_atomic_pos,
-                       orig.fcp_pos, pos.fcp_pos, relaxed, {
+           orig.fcp_pos, pos.fcp_pos, relaxed, {
                if (orig.fcp_pos == 0) {
                if (orig.fcp_pos == 0) {
-                       // we acquired a really really old reference, and we probably
-                       // just faulted in a new page
-                       os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
+                       // we acquired a really really old reference, and we probably
+                       // just faulted in a new page
+                       os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
                }
                if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig, stream)) {
                }
                if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig, stream)) {
-                       // nothing to do if the chunk is full, or the stream doesn't match,
-                       // in which case the thread probably:
-                       // - loaded the chunk ref
-                       // - been suspended a long while
-                       // - read the chunk to find a very old thing
-                       os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
+                       // nothing to do if the chunk is full, or the stream doesn't match,
+                       // in which case the thread probably:
+                       // - loaded the chunk ref
+                       // - been suspended a long while
+                       // - read the chunk to find a very old thing
+                       os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL);
                }
                }
-               pos = orig;
-               if (!firehose_chunk_pos_fits(orig,
-                               ft_size + pubsize + privsize) || !stamp_delta_fits) {
-                       pos.fcp_flag_full = true;
-                       reservation_failed = true;
+               pos.fcp_pos = orig.fcp_pos;
+               if (!firehose_chunk_pos_fits(&orig,
+               ft_size + pubsize + privsize) || !stamp_delta_fits) {
+                       pos.fcp_flag_full = true;
+                       reservation_failed = true;
                } else {
                } else {
-                       if (qos > pos.fcp_qos) pos.fcp_qos = qos;
-                       // using these *_INC macros is so that the compiler generates better
-                       // assembly: using the struct individual fields forces the compiler
-                       // to handle carry propagations, and we know it won't happen
-                       pos.fcp_pos += roundup(ft_size + pubsize, 8) *
-                                       FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC;
-                       pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC;
-                       pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC;
-                       const uint16_t minimum_payload_size = 16;
-                       if (!firehose_chunk_pos_fits(pos,
-                                       roundup(ft_size + minimum_payload_size , 8))) {
-                               // if we can't even have minimum_payload_size bytes of payload
-                               // for the next tracepoint, just flush right away
-                               pos.fcp_flag_full = true;
+                       if (qos > pos.fcp_qos) {
+                               pos.fcp_qos = qos;
                        }
                        }
-                       reservation_failed = false;
+                       // using these *_INC macros is so that the compiler generates better
+                       // assembly: using the struct individual fields forces the compiler
+                       // to handle carry propagations, and we know it won't happen
+                       pos.fcp_pos += roundup(ft_size + pubsize, 8) *
+                       FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC;
+                       pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC;
+                       pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC;
+                       const uint16_t minimum_payload_size = 16;
+                       if (!firehose_chunk_pos_fits(&pos,
+                       roundup(ft_size + minimum_payload_size, 8))) {
+                               // if we can't even have minimum_payload_size bytes of payload
+                               // for the next tracepoint, just flush right away
+                               pos.fcp_flag_full = true;
+                       }
+                       reservation_failed = false;
                }
        });
 
                }
        });
 
@@ -144,7 +142,7 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
                return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE;
        }
        if (privptr) {
                return FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE;
        }
        if (privptr) {
-               *privptr = fc->fc_start + pos.fcp_private_offs;
+               *privptr = (uint8_t *)((uintptr_t)fc->fc_start + pos.fcp_private_offs);
        }
        return orig.fcp_next_entry_offs;
 }
        }
        return orig.fcp_next_entry_offs;
 }
@@ -152,17 +150,17 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp,
 OS_ALWAYS_INLINE
 static inline firehose_tracepoint_t
 firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp,
 OS_ALWAYS_INLINE
 static inline firehose_tracepoint_t
 firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp,
-               uint16_t pubsize, uint64_t thread_id, long offset)
+    uint16_t pubsize, uint64_t thread_id, long offset)
 {
        firehose_tracepoint_t ft = (firehose_tracepoint_t)
 {
        firehose_tracepoint_t ft = (firehose_tracepoint_t)
-                       __builtin_assume_aligned(fc->fc_start + offset, 8);
+           __builtin_assume_aligned((void *)((uintptr_t)fc->fc_start + (uintptr_t)offset), 8);
        stamp -= fc->fc_timestamp;
        stamp |= (uint64_t)pubsize << 48;
        // The compiler barrier is needed for userland process death handling, see
        // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
        stamp -= fc->fc_timestamp;
        stamp |= (uint64_t)pubsize << 48;
        // The compiler barrier is needed for userland process death handling, see
        // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install.
-       atomic_store_explicit(&ft->ft_atomic_stamp_and_length, stamp,
-                       memory_order_relaxed);
-       __asm__ __volatile__("" ::: "memory");
+       os_atomic_std(atomic_store_explicit)(&ft->ft_atomic_stamp_and_length, stamp,
+           os_atomic_std(memory_order_relaxed));
+       __asm__ __volatile__ ("" ::: "memory");
        ft->ft_thread = thread_id;
        return ft;
 }
        ft->ft_thread = thread_id;
        return ft;
 }
@@ -170,16 +168,17 @@ firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp,
 OS_ALWAYS_INLINE
 static inline bool
 firehose_chunk_tracepoint_end(firehose_chunk_t fc,
 OS_ALWAYS_INLINE
 static inline bool
 firehose_chunk_tracepoint_end(firehose_chunk_t fc,
-               firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
+    firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
 {
        firehose_chunk_pos_u pos;
 
 {
        firehose_chunk_pos_u pos;
 
-       atomic_store_explicit(&ft->ft_id.ftid_atomic_value,
-                       ftid.ftid_value, memory_order_release);
-       pos.fcp_pos = atomic_fetch_sub_explicit(&fc->fc_pos.fcp_atomic_pos,
-                       FIREHOSE_CHUNK_POS_REFCNT_INC, memory_order_relaxed);
+       os_atomic_std(atomic_store_explicit)(&ft->ft_id.ftid_atomic_value,
+           ftid.ftid_value, os_atomic_std(memory_order_release));
+       pos.fcp_pos = os_atomic_std(atomic_fetch_sub_explicit)(&fc->fc_pos.fcp_atomic_pos,
+           FIREHOSE_CHUNK_POS_REFCNT_INC, os_atomic_std(memory_order_relaxed));
        return pos.fcp_refcnt == 1 && pos.fcp_flag_full;
 }
        return pos.fcp_refcnt == 1 && pos.fcp_flag_full;
 }
+#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY
 
 #endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)
 
 
 #endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI)