1 /* * Copyright (c) 2020 Apple Inc. All rights reserved.
3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/assert.h>
29 #include <kern/cpu_data.h>
30 #include <kern/counter.h>
31 #include <kern/zalloc.h>
32 #include <machine/atomic.h>
33 #include <machine/machine_routines.h>
34 #include <machine/cpu_number.h>
36 SECURITY_READ_ONLY_LATE(zone_t
) counters_zone
;
37 ZONE_INIT(&counters_zone
, "per_cpu_counters", sizeof(uint64_t),
38 ZC_PERCPU
| ZC_ALIGNMENT_REQUIRED
, ZONE_ID_ANY
, NULL
);
41 * Tracks how many static scalable counters are in use since they won't show up
42 * in the per_cpu_counters zone stats.
44 uint64_t num_static_scalable_counters
;
47 * Mangle the given scalable_counter_t so that it points to the early storage
48 * regardless of which CPU # we're boot on.
49 * Must be run before we go multi-core.
52 scalable_counter_static_boot_mangle(scalable_counter_t
*counter
)
54 *counter
= __zpcpu_mangle_for_boot(*counter
);
58 * Initializes a static counter in permanent per-cpu memory.
59 * Run during startup for each static per-cpu counter
60 * Must be run before we go multi-core.
63 scalable_counter_static_init(scalable_counter_t
*counter
)
66 * We pointed the counter to a single global value during early boot.
67 * Grab that value now. We'll store it in our current CPU's value
69 uint64_t current_value
= os_atomic_load_wide(zpercpu_get(*counter
), relaxed
);
71 * This counter can't be freed so we allocate it out of the permanent zone rather than
74 *counter
= zalloc_percpu_permanent(sizeof(uint64_t), ZALIGN_64
);
75 os_atomic_store_wide(zpercpu_get(*counter
), current_value
, relaxed
);
76 num_static_scalable_counters
++;
81 counter_alloc(scalable_counter_t
*counter
)
83 *counter
= zalloc_percpu(counters_zone
, Z_WAITOK
| Z_ZERO
| Z_NOFAIL
);
88 counter_alloc(atomic_counter_t
*counter
)
90 os_atomic_store_wide(counter
, 0, relaxed
);
95 counter_free(scalable_counter_t
*counter
)
97 zfree_percpu(counters_zone
, *counter
);
102 counter_free(atomic_counter_t
*counter
)
109 counter_add(atomic_counter_t
*counter
, uint64_t amount
)
111 os_atomic_add(counter
, amount
, relaxed
);
116 counter_inc(atomic_counter_t
*counter
)
118 os_atomic_inc(counter
, relaxed
);
123 counter_dec(atomic_counter_t
*counter
)
125 os_atomic_dec(counter
, relaxed
);
130 counter_add_preemption_disabled(atomic_counter_t
*counter
, uint64_t amount
)
132 counter_add(counter
, amount
);
137 counter_inc_preemption_disabled(atomic_counter_t
*counter
)
139 counter_inc(counter
);
144 counter_dec_preemption_disabled(atomic_counter_t
*counter
)
146 counter_dec(counter
);
151 counter_load(atomic_counter_t
*counter
)
153 return os_atomic_load_wide(counter
, relaxed
);
158 counter_load(scalable_counter_t
*counter
)
161 zpercpu_foreach(it
, *counter
) {
162 value
+= os_atomic_load_wide(it
, relaxed
);