]>
Commit | Line | Data |
---|---|---|
c3c9b80d A |
1 | /* * Copyright (c) 2020 Apple Inc. All rights reserved. |
2 | * | |
3 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
4 | * | |
5 | * This file contains Original Code and/or Modifications of Original Code | |
6 | * as defined in and that are subject to the Apple Public Source License | |
7 | * Version 2.0 (the 'License'). You may not use this file except in | |
8 | * compliance with the License. The rights granted to you under the License | |
9 | * may not be used to create, or enable the creation or redistribution of, | |
10 | * unlawful or unlicensed copies of an Apple operating system, or to | |
11 | * circumvent, violate, or enable the circumvention or violation of, any | |
12 | * terms of an Apple operating system software license agreement. | |
13 | * | |
14 | * Please obtain a copy of the License at | |
15 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
16 | * | |
17 | * The Original Code and all software distributed under the License are | |
18 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
19 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
20 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
22 | * Please see the License for the specific language governing rights and | |
23 | * limitations under the License. | |
24 | * | |
25 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
26 | */ | |
27 | ||
28 | #include <kern/assert.h> | |
29 | #include <kern/cpu_data.h> | |
30 | #include <kern/counter.h> | |
31 | #include <kern/zalloc.h> | |
32 | #include <machine/atomic.h> | |
33 | #include <machine/machine_routines.h> | |
34 | #include <machine/cpu_number.h> | |
35 | ||
36 | SECURITY_READ_ONLY_LATE(zone_t) counters_zone; | |
37 | ZONE_INIT(&counters_zone, "per_cpu_counters", sizeof(uint64_t), | |
38 | ZC_PERCPU | ZC_ALIGNMENT_REQUIRED, ZONE_ID_ANY, NULL); | |
39 | ||
40 | /* | |
41 | * Tracks how many static scalable counters are in use since they won't show up | |
42 | * in the per_cpu_counters zone stats. | |
43 | */ | |
44 | uint64_t num_static_scalable_counters; | |
45 | ||
46 | /* | |
47 | * Mangle the given scalable_counter_t so that it points to the early storage | |
48 | * regardless of which CPU # we're boot on. | |
49 | * Must be run before we go multi-core. | |
50 | */ | |
51 | __startup_func void | |
52 | scalable_counter_static_boot_mangle(scalable_counter_t *counter) | |
53 | { | |
54 | *counter = __zpcpu_mangle_for_boot(*counter); | |
55 | } | |
56 | ||
57 | /* | |
58 | * Initializes a static counter in permanent per-cpu memory. | |
59 | * Run during startup for each static per-cpu counter | |
60 | * Must be run before we go multi-core. | |
61 | */ | |
62 | __startup_func void | |
63 | scalable_counter_static_init(scalable_counter_t *counter) | |
64 | { | |
65 | /* | |
66 | * We pointed the counter to a single global value during early boot. | |
67 | * Grab that value now. We'll store it in our current CPU's value | |
68 | */ | |
69 | uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed); | |
70 | /* | |
71 | * This counter can't be freed so we allocate it out of the permanent zone rather than | |
72 | * our counter zone. | |
73 | */ | |
74 | *counter = zalloc_percpu_permanent(sizeof(uint64_t), ZALIGN_64); | |
75 | os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed); | |
76 | num_static_scalable_counters++; | |
77 | } | |
78 | ||
79 | OS_OVERLOADABLE | |
80 | void | |
81 | counter_alloc(scalable_counter_t *counter) | |
82 | { | |
83 | *counter = zalloc_percpu(counters_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); | |
84 | } | |
85 | ||
86 | OS_OVERLOADABLE | |
87 | void | |
88 | counter_alloc(atomic_counter_t *counter) | |
89 | { | |
90 | os_atomic_store_wide(counter, 0, relaxed); | |
91 | } | |
92 | ||
93 | OS_OVERLOADABLE | |
94 | void | |
95 | counter_free(scalable_counter_t *counter) | |
96 | { | |
97 | zfree_percpu(counters_zone, *counter); | |
98 | } | |
99 | ||
100 | OS_OVERLOADABLE | |
101 | void | |
102 | counter_free(atomic_counter_t *counter) | |
103 | { | |
104 | (void)counter; | |
105 | } | |
106 | ||
107 | OS_OVERLOADABLE | |
108 | void | |
109 | counter_add(atomic_counter_t *counter, uint64_t amount) | |
110 | { | |
111 | os_atomic_add(counter, amount, relaxed); | |
112 | } | |
113 | ||
114 | OS_OVERLOADABLE | |
115 | void | |
116 | counter_inc(atomic_counter_t *counter) | |
117 | { | |
118 | os_atomic_inc(counter, relaxed); | |
119 | } | |
120 | ||
121 | OS_OVERLOADABLE | |
122 | void | |
123 | counter_dec(atomic_counter_t *counter) | |
124 | { | |
125 | os_atomic_dec(counter, relaxed); | |
126 | } | |
127 | ||
128 | OS_OVERLOADABLE | |
129 | void | |
130 | counter_add_preemption_disabled(atomic_counter_t *counter, uint64_t amount) | |
131 | { | |
132 | counter_add(counter, amount); | |
133 | } | |
134 | ||
135 | OS_OVERLOADABLE | |
136 | void | |
137 | counter_inc_preemption_disabled(atomic_counter_t *counter) | |
138 | { | |
139 | counter_inc(counter); | |
140 | } | |
141 | ||
142 | OS_OVERLOADABLE | |
143 | void | |
144 | counter_dec_preemption_disabled(atomic_counter_t *counter) | |
145 | { | |
146 | counter_dec(counter); | |
147 | } | |
148 | ||
149 | OS_OVERLOADABLE | |
150 | uint64_t | |
151 | counter_load(atomic_counter_t *counter) | |
152 | { | |
153 | return os_atomic_load_wide(counter, relaxed); | |
154 | } | |
155 | ||
156 | OS_OVERLOADABLE | |
157 | uint64_t | |
158 | counter_load(scalable_counter_t *counter) | |
159 | { | |
160 | uint64_t value = 0; | |
161 | zpercpu_foreach(it, *counter) { | |
162 | value += os_atomic_load_wide(it, relaxed); | |
163 | } | |
164 | return value; | |
165 | } |