]>
Commit | Line | Data |
---|---|---|
c3c9b80d A |
1 | /* |
2 | * Copyright (c) 2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #ifdef XNU_KERNEL_PRIVATE | |
29 | ||
30 | #ifndef _KERN_COUNTER_H | |
31 | #define _KERN_COUNTER_H | |
32 | ||
33 | /*! | |
34 | * @file <kern/counter.h> | |
35 | * | |
36 | * @brief | |
37 | * Module for working with 64bit relaxed atomic counters. | |
38 | * | |
39 | * @discussion | |
40 | * Different counter types have different speed-memory tradeoffs, but | |
41 | * they all share a common interface. | |
42 | * | |
43 | * Counters can be statically allocated or dynamically allocated. | |
44 | * | |
45 | * Statically allocated counters are always backed by per-cpu storage which means | |
46 | * writes take place on the current CPUs value and reads sum all of the per-cpu values. | |
47 | * | |
48 | * Dynamically allocated counters can be either per-cpu or use a single 64bit value. | |
49 | * To create a per-cpu counter, use the scalable_counter_t type. Note that this | |
50 | * trades of additional memory for better scalability. | |
51 | * To create a single 64bit counter, use the atomic_counter_t type. | |
52 | * | |
53 | * For most counters you can just use the counter_t type and the choice of | |
54 | * scalable or atomic will be made at compile time based on the target. | |
55 | * | |
56 | * The counter types are opaque handles. They ARE NOT COPYABLE. If you need | |
57 | * to make a copy of a counter, you should do so like this: | |
58 | * <code> | |
59 | * counter_t original; | |
60 | * ... | |
61 | * counter_t copy; | |
62 | * counter_alloc(©); | |
63 | * counter_add(©, counter_load(&original)); | |
64 | * ... | |
65 | * // Make sure to free them at some point. | |
66 | * counter_free(&original); | |
67 | * counter_free(©); | |
68 | * </code> | |
69 | * | |
70 | * Static counter example: | |
71 | * <code> | |
72 | * SCALABLE_COUNTER_DEFINE(my_counter); | |
73 | * ... | |
74 | * counter_inc(&my_counter); | |
75 | * assert(counter_load(&my_counter) == 1); | |
76 | * </code> | |
77 | * | |
78 | * Dynamic Counter Example: | |
79 | * <code> | |
80 | * scalable_counter_t my_percpu_counter; | |
81 | * atomic_counter_t my_atomic_counter; | |
82 | * counter_t my_counter; | |
83 | * | |
84 | * // All three counters share the same interface. So to change the speed-memory | |
85 | * // tradeoff just change the type. | |
86 | * counter_init(&my_scalable_counter); | |
87 | * counter_init(&my_atomic_counter); | |
88 | * counter_init(&my_counter); | |
89 | * | |
90 | * counter_inc(&my_scalable_counter); | |
91 | * counter_inc(&my_atomic_counter); | |
92 | * counter_inc(&my_counter); | |
93 | * | |
94 | * assert(counter_load(&my_scalable_counter) == 1); | |
95 | * assert(counter_load(&my_atomic_counter) == 1); | |
96 | * assert(counter_load(&my_counter) == 1); | |
97 | * </code> | |
98 | */ | |
99 | ||
100 | #include <mach/mach_types.h> | |
101 | #include <kern/macro_help.h> | |
102 | #include <kern/startup.h> | |
103 | #include <kern/zalloc.h> | |
104 | ||
105 | typedef __zpercpu uint64_t *scalable_counter_t; | |
106 | typedef uint64_t atomic_counter_t; | |
107 | /* Generic counter base type. Does not have an implementation. */ | |
108 | struct generic_counter_t; | |
109 | ||
110 | /*! | |
111 | * @macro SCALABLE_COUNTER_DECLARE | |
112 | * | |
113 | * @abstract | |
114 | * (optionally) declares a static per-cpu counter (in a header). | |
115 | * | |
116 | * @param var the name of the counter. | |
117 | */ | |
118 | #define SCALABLE_COUNTER_DECLARE(name) \ | |
119 | extern scalable_counter_t name; | |
120 | ||
121 | /*! | |
122 | * @macro SCALABLE_COUNTER_DEFINE | |
123 | * | |
124 | * @abstract | |
125 | * Defines a static per-cpu counter. | |
126 | * Counter can only be accessed after the TUNABLES phase of startup. | |
127 | * | |
128 | * @param var the name of the counter. | |
129 | */ | |
130 | #define SCALABLE_COUNTER_DEFINE(name) \ | |
131 | __startup_data uint64_t __ ##name##_early_storage = 0; \ | |
132 | scalable_counter_t name = {&__##name##_early_storage}; \ | |
133 | STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE, scalable_counter_static_boot_mangle, &name); \ | |
134 | STARTUP_ARG(PERCPU, STARTUP_RANK_SECOND, scalable_counter_static_init, &name); | |
135 | ||
136 | /* | |
137 | * Initialize a per-cpu counter. | |
138 | * May block and will never fail. | |
139 | * This counter must be freed with counter_free. | |
140 | */ | |
141 | OS_OVERLOADABLE | |
142 | extern void counter_alloc(struct generic_counter_t *); | |
143 | ||
144 | OS_OVERLOADABLE | |
145 | extern void counter_free(struct generic_counter_t *); | |
146 | /* | |
147 | * Add amount to counter. | |
148 | * @param amount: The amount to add. | |
149 | */ | |
150 | OS_OVERLOADABLE | |
151 | extern void counter_add(struct generic_counter_t *, uint64_t amount); | |
152 | ||
153 | /* | |
154 | * Add 1 to this counter. | |
155 | */ | |
156 | OS_OVERLOADABLE | |
157 | extern void counter_inc(struct generic_counter_t *); | |
158 | ||
159 | /* | |
160 | * Subtract 1 from this counter. | |
161 | */ | |
162 | OS_OVERLOADABLE | |
163 | extern void counter_dec(struct generic_counter_t *); | |
164 | ||
165 | /* Variants of the above operations where the caller takes responsibility for disabling preemption. */ | |
166 | OS_OVERLOADABLE | |
167 | extern void counter_add_preemption_disabled(struct generic_counter_t *, uint64_t amount); | |
168 | OS_OVERLOADABLE | |
169 | extern void counter_inc_preemption_disabled(struct generic_counter_t *); | |
170 | OS_OVERLOADABLE | |
171 | extern void counter_dec_preemption_disabled(struct generic_counter_t *); | |
172 | ||
173 | /* | |
174 | * Read the value of the percpu counter. | |
175 | * Note that this will cause synchronization of all the sharded values. | |
176 | */ | |
177 | OS_OVERLOADABLE | |
178 | extern uint64_t counter_load(struct generic_counter_t *); | |
179 | ||
180 | #pragma mark implementation details | |
181 | /* NB: Nothing below here should be used directly. */ | |
182 | ||
183 | __startup_func void scalable_counter_static_boot_mangle(scalable_counter_t *counter); | |
184 | __startup_func void scalable_counter_static_init(scalable_counter_t *counter); | |
185 | ||
186 | #if XNU_TARGET_OS_WATCH || XNU_TARGET_OS_TV | |
187 | #define ATOMIC_COUNTER_USE_PERCPU 0 | |
188 | #else | |
189 | #define ATOMIC_COUNTER_USE_PERCPU 1 | |
190 | #endif /* XNU_TARGET_OS_OSX */ | |
191 | ||
192 | #if ATOMIC_COUNTER_USE_PERCPU | |
193 | typedef scalable_counter_t counter_t; | |
194 | #else | |
195 | typedef atomic_counter_t counter_t; | |
196 | #endif /* ATOMIC_COUNTER_USE_PERCPU */ | |
197 | ||
198 | #define COUNTER_MAKE_PROTOTYPES(counter_t) \ | |
199 | OS_OVERLOADABLE \ | |
200 | extern void counter_alloc(counter_t *); \ | |
201 | \ | |
202 | OS_OVERLOADABLE \ | |
203 | extern void counter_free(counter_t *); \ | |
204 | \ | |
205 | OS_OVERLOADABLE \ | |
206 | extern void counter_add(counter_t *, uint64_t amount); \ | |
207 | \ | |
208 | OS_OVERLOADABLE \ | |
209 | extern void counter_inc(counter_t *); \ | |
210 | \ | |
211 | OS_OVERLOADABLE \ | |
212 | extern void counter_dec(counter_t *); \ | |
213 | \ | |
214 | OS_OVERLOADABLE \ | |
215 | extern void counter_add_preemption_disabled(counter_t *, uint64_t amount); \ | |
216 | \ | |
217 | OS_OVERLOADABLE \ | |
218 | extern void counter_inc_preemption_disabled(counter_t *); \ | |
219 | \ | |
220 | OS_OVERLOADABLE \ | |
221 | extern void counter_dec_preemption_disabled(counter_t *); \ | |
222 | \ | |
223 | OS_OVERLOADABLE \ | |
224 | extern uint64_t counter_load(counter_t *); | |
225 | ||
226 | COUNTER_MAKE_PROTOTYPES(scalable_counter_t); | |
227 | COUNTER_MAKE_PROTOTYPES(atomic_counter_t); | |
228 | ||
229 | #endif /* _KERN_COUNTER_H */ | |
230 | ||
231 | #endif /* XNU_KERNEL_PRIVATE */ |