]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/counter.c
1 /* * Copyright (c) 2020 Apple Inc. All rights reserved.
3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/assert.h>
29 #include <kern/cpu_data.h>
30 #include <kern/counter.h>
31 #include <kern/zalloc.h>
32 #include <machine/atomic.h>
33 #include <machine/machine_routines.h>
34 #include <machine/cpu_number.h>
38 counter_add(scalable_counter_t
*counter
, uint64_t amount
)
40 os_atomic_add(zpercpu_get(*counter
), amount
, relaxed
);
45 counter_inc(scalable_counter_t
*counter
)
47 os_atomic_inc(zpercpu_get(*counter
), relaxed
);
52 counter_dec(scalable_counter_t
*counter
)
54 os_atomic_dec(zpercpu_get(*counter
), relaxed
);
58 * NB: On arm, the preemption disabled implementation is the same as
59 * the normal implementation. Otherwise we would need to enforce that
60 * callers never mix the interfaces for the same counter.
64 counter_add_preemption_disabled(scalable_counter_t
*counter
, uint64_t amount
)
66 counter_add(counter
, amount
);
71 counter_inc_preemption_disabled(scalable_counter_t
*counter
)
78 counter_dec_preemption_disabled(scalable_counter_t
*counter
)