2 * Copyright (c) 2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #ifdef XNU_KERNEL_PRIVATE
30 #ifndef _KERN_COUNTER_H
31 #define _KERN_COUNTER_H
34 * @file <kern/counter.h>
37 * Module for working with 64bit relaxed atomic counters.
40 * Different counter types have different speed-memory tradeoffs, but
41 * they all share a common interface.
43 * Counters can be statically allocated or dynamically allocated.
45 * Statically allocated counters are always backed by per-cpu storage which means
46 * writes take place on the current CPUs value and reads sum all of the per-cpu values.
48 * Dynamically allocated counters can be either per-cpu or use a single 64bit value.
49 * To create a per-cpu counter, use the scalable_counter_t type. Note that this
50 * trades of additional memory for better scalability.
51 * To create a single 64bit counter, use the atomic_counter_t type.
53 * For most counters you can just use the counter_t type and the choice of
54 * scalable or atomic will be made at compile time based on the target.
56 * The counter types are opaque handles. They ARE NOT COPYABLE. If you need
57 * to make a copy of a counter, you should do so like this:
62 * counter_alloc(©);
63 * counter_add(©, counter_load(&original));
65 * // Make sure to free them at some point.
66 * counter_free(&original);
67 * counter_free(©);
70 * Static counter example:
72 * SCALABLE_COUNTER_DEFINE(my_counter);
74 * counter_inc(&my_counter);
75 * assert(counter_load(&my_counter) == 1);
78 * Dynamic Counter Example:
80 * scalable_counter_t my_percpu_counter;
81 * atomic_counter_t my_atomic_counter;
82 * counter_t my_counter;
84 * // All three counters share the same interface. So to change the speed-memory
85 * // tradeoff just change the type.
86 * counter_init(&my_scalable_counter);
87 * counter_init(&my_atomic_counter);
88 * counter_init(&my_counter);
90 * counter_inc(&my_scalable_counter);
91 * counter_inc(&my_atomic_counter);
92 * counter_inc(&my_counter);
94 * assert(counter_load(&my_scalable_counter) == 1);
95 * assert(counter_load(&my_atomic_counter) == 1);
96 * assert(counter_load(&my_counter) == 1);
100 #include <mach/mach_types.h>
101 #include <kern/macro_help.h>
102 #include <kern/startup.h>
103 #include <kern/zalloc.h>
105 typedef __zpercpu
uint64_t *scalable_counter_t
;
106 typedef uint64_t atomic_counter_t
;
107 /* Generic counter base type. Does not have an implementation. */
108 struct generic_counter_t
;
111 * @macro SCALABLE_COUNTER_DECLARE
114 * (optionally) declares a static per-cpu counter (in a header).
116 * @param var the name of the counter.
118 #define SCALABLE_COUNTER_DECLARE(name) \
119 extern scalable_counter_t name;
122 * @macro SCALABLE_COUNTER_DEFINE
125 * Defines a static per-cpu counter.
126 * Counter can only be accessed after the TUNABLES phase of startup.
128 * @param var the name of the counter.
130 #define SCALABLE_COUNTER_DEFINE(name) \
131 __startup_data uint64_t __ ##name##_early_storage = 0; \
132 scalable_counter_t name = {&__##name##_early_storage}; \
133 STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE, scalable_counter_static_boot_mangle, &name); \
134 STARTUP_ARG(PERCPU, STARTUP_RANK_SECOND, scalable_counter_static_init, &name);
137 * Initialize a per-cpu counter.
138 * May block and will never fail.
139 * This counter must be freed with counter_free.
142 extern void counter_alloc(struct generic_counter_t
*);
145 extern void counter_free(struct generic_counter_t
*);
147 * Add amount to counter.
148 * @param amount: The amount to add.
151 extern void counter_add(struct generic_counter_t
*, uint64_t amount
);
154 * Add 1 to this counter.
157 extern void counter_inc(struct generic_counter_t
*);
160 * Subtract 1 from this counter.
163 extern void counter_dec(struct generic_counter_t
*);
165 /* Variants of the above operations where the caller takes responsibility for disabling preemption. */
167 extern void counter_add_preemption_disabled(struct generic_counter_t
*, uint64_t amount
);
169 extern void counter_inc_preemption_disabled(struct generic_counter_t
*);
171 extern void counter_dec_preemption_disabled(struct generic_counter_t
*);
174 * Read the value of the percpu counter.
175 * Note that this will cause synchronization of all the sharded values.
178 extern uint64_t counter_load(struct generic_counter_t
*);
180 #pragma mark implementation details
181 /* NB: Nothing below here should be used directly. */
183 __startup_func
void scalable_counter_static_boot_mangle(scalable_counter_t
*counter
);
184 __startup_func
void scalable_counter_static_init(scalable_counter_t
*counter
);
186 #if XNU_TARGET_OS_WATCH || XNU_TARGET_OS_TV
187 #define ATOMIC_COUNTER_USE_PERCPU 0
189 #define ATOMIC_COUNTER_USE_PERCPU 1
190 #endif /* XNU_TARGET_OS_OSX */
192 #if ATOMIC_COUNTER_USE_PERCPU
193 typedef scalable_counter_t counter_t
;
195 typedef atomic_counter_t counter_t
;
196 #endif /* ATOMIC_COUNTER_USE_PERCPU */
198 #define COUNTER_MAKE_PROTOTYPES(counter_t) \
200 extern void counter_alloc(counter_t *); \
203 extern void counter_free(counter_t *); \
206 extern void counter_add(counter_t *, uint64_t amount); \
209 extern void counter_inc(counter_t *); \
212 extern void counter_dec(counter_t *); \
215 extern void counter_add_preemption_disabled(counter_t *, uint64_t amount); \
218 extern void counter_inc_preemption_disabled(counter_t *); \
221 extern void counter_dec_preemption_disabled(counter_t *); \
224 extern uint64_t counter_load(counter_t *);
226 COUNTER_MAKE_PROTOTYPES(scalable_counter_t
);
227 COUNTER_MAKE_PROTOTYPES(atomic_counter_t
);
229 #endif /* _KERN_COUNTER_H */
231 #endif /* XNU_KERNEL_PRIVATE */