2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * @APPLE_FREE_COPYRIGHT@
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/pmap.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_protos.h>
49 #include <ipc/ipc_port.h>
50 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
51 #include <arm/rtclock.h>
52 #include <libkern/OSAtomic.h>
53 #include <stdatomic.h>
55 #include <sys/kdebug.h>
58 #include <atm/atm_internal.h>
61 static void commpage_init_cpu_capabilities( void );
62 static int commpage_cpus( void );
64 SECURITY_READ_ONLY_LATE(vm_address_t
) commPagePtr
=0;
65 SECURITY_READ_ONLY_LATE(vm_address_t
) sharedpage_rw_addr
= 0;
66 SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities
= 0;
68 /* For sysctl access from BSD side */
69 extern int gARMv81Atomics
;
70 extern int gARMv8Crc32
;
79 sharedpage_rw_addr
= pmap_create_sharedpage();
80 commPagePtr
= (vm_address_t
)_COMM_PAGE_BASE_ADDRESS
;
82 *((uint16_t*)(_COMM_PAGE_VERSION
+_COMM_PAGE_RW_OFFSET
)) = (uint16_t) _COMM_PAGE_THIS_VERSION
;
84 commpage_init_cpu_capabilities();
85 commpage_set_timestamp(0, 0, 0, 0, 0);
87 if (_cpu_capabilities
& kCache32
)
89 else if (_cpu_capabilities
& kCache64
)
91 else if (_cpu_capabilities
& kCache128
)
96 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE
+_COMM_PAGE_RW_OFFSET
)) = c2
;
97 *((uint32_t*)(_COMM_PAGE_SPIN_COUNT
+_COMM_PAGE_RW_OFFSET
)) = 1;
99 commpage_update_active_cpus();
100 cpufamily
= cpuid_get_cpufamily();
102 /* machine_info valid after ml_get_max_cpus() */
103 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) machine_info
.physical_cpu_max
;
104 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS
+_COMM_PAGE_RW_OFFSET
))= (uint8_t) machine_info
.logical_cpu_max
;
105 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE
+_COMM_PAGE_RW_OFFSET
)) = machine_info
.max_mem
;
106 *((uint32_t*)(_COMM_PAGE_CPUFAMILY
+_COMM_PAGE_RW_OFFSET
)) = (uint32_t)cpufamily
;
107 *((uint32_t*)(_COMM_PAGE_DEV_FIRM
+_COMM_PAGE_RW_OFFSET
)) = (uint32_t)PE_i_can_has_debugger(NULL
);
108 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE
+_COMM_PAGE_RW_OFFSET
)) = user_timebase_allowed();
109 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK
+_COMM_PAGE_RW_OFFSET
)) = user_cont_hwclock_allowed();
110 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) page_shift
;
113 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) page_shift_user32
;
114 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
115 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
116 /* enforce 16KB alignment for watch targets with new ABI */
117 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
118 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
119 #else /* __arm64__ */
120 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) PAGE_SHIFT
;
121 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+_COMM_PAGE_RW_OFFSET
)) = (uint8_t) PAGE_SHIFT
;
122 #endif /* __arm64__ */
124 commpage_update_timebase();
125 commpage_update_mach_continuous_time(0);
128 clock_usec_t microsecs
;
129 clock_get_boottime_microtime(&secs
, µsecs
);
130 commpage_update_boottime(secs
* USEC_PER_SEC
+ microsecs
);
133 * set commpage approximate time to zero for initialization.
134 * scheduler shall populate correct value before running user thread
136 *((uint64_t *)(_COMM_PAGE_APPROX_TIME
+ _COMM_PAGE_RW_OFFSET
)) = 0;
137 #ifdef CONFIG_MACH_APPROXIMATE_TIME
138 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED
+_COMM_PAGE_RW_OFFSET
)) = 1;
140 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED
+_COMM_PAGE_RW_OFFSET
)) = 0;
143 commpage_update_kdebug_state();
146 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
152 uint64_t m
; // magic number
153 int32_t a
; // add indicator
154 int32_t s
; // shift amount
158 commpage_set_timestamp(
163 uint64_t tick_per_sec
)
165 new_commpage_timeofday_data_t
*commpage_timeofday_datap
;
167 if (commPagePtr
== 0)
170 commpage_timeofday_datap
= (new_commpage_timeofday_data_t
*)(_COMM_PAGE_NEWTIMEOFDAY_DATA
+_COMM_PAGE_RW_OFFSET
);
172 commpage_timeofday_datap
->TimeStamp_tick
= 0x0ULL
;
174 #if (__ARM_ARCH__ >= 7)
175 __asm__
volatile("dmb ish");
177 commpage_timeofday_datap
->TimeStamp_sec
= secs
;
178 commpage_timeofday_datap
->TimeStamp_frac
= frac
;
179 commpage_timeofday_datap
->Ticks_scale
= scale
;
180 commpage_timeofday_datap
->Ticks_per_sec
= tick_per_sec
;
182 #if (__ARM_ARCH__ >= 7)
183 __asm__
volatile("dmb ish");
185 commpage_timeofday_datap
->TimeStamp_tick
= tbr
;
189 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
193 commpage_set_memory_pressure(
194 unsigned int pressure
)
196 if (commPagePtr
== 0)
198 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE
+_COMM_PAGE_RW_OFFSET
)) = pressure
;
202 * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc.
206 commpage_set_spin_count(
209 if (count
== 0) /* we test for 0 after decrement, not before */
212 if (commPagePtr
== 0)
214 *((uint32_t *)(_COMM_PAGE_SPIN_COUNT
+_COMM_PAGE_RW_OFFSET
)) = count
;
218 * Determine number of CPUs on this system.
221 commpage_cpus( void )
225 cpus
= ml_get_max_cpus(); // NB: this call can block
228 panic("commpage cpus==0");
236 _get_commpage_priv_address(void)
238 return sharedpage_rw_addr
;
242 * Initialize _cpu_capabilities vector
245 commpage_init_cpu_capabilities( void )
249 ml_cpu_info_t cpu_info
;
252 ml_cpu_get_info(&cpu_info
);
254 switch (cpu_info
.cache_line_size
) {
267 cpus
= commpage_cpus();
272 bits
|= (cpus
<< kNumCPUsShift
);
274 bits
|= kFastThreadLocalStorage
; // TPIDRURO for TLS
278 arm_mvfp_info_t
*mvfp_info
= arm_mvfp_info();
281 if (mvfp_info
->neon_hpfp
)
282 bits
|= kHasNeonHPFP
;
283 if (mvfp_info
->neon_fp16
)
284 bits
|= kHasNeonFP16
;
286 #if defined(__arm64__)
289 #if __ARM_ENABLE_WFE_
291 if (arm64_wfe_allowed()) {
298 #if __ARM_V8_CRYPTO_EXTENSIONS__
299 bits
|= kHasARMv8Crypto
;
302 uint64_t isar0
= __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
303 if ((isar0
& ID_AA64ISAR0_EL1_ATOMIC_MASK
) == ID_AA64ISAR0_EL1_ATOMIC_8_1
) {
304 bits
|= kHasARMv81Atomics
;
307 if ((isar0
& ID_AA64ISAR0_EL1_CRC32_MASK
) == ID_AA64ISAR0_EL1_CRC32_EN
) {
308 bits
|= kHasARMv8Crc32
;
312 _cpu_capabilities
= bits
;
314 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES
+_COMM_PAGE_RW_OFFSET
)) = _cpu_capabilities
;
318 * Updated every time a logical CPU goes offline/online
321 commpage_update_active_cpus(void)
325 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS
+_COMM_PAGE_RW_OFFSET
)) = processor_avail_count
;
329 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
332 commpage_update_timebase(void)
335 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET
+_COMM_PAGE_RW_OFFSET
)) = rtclock_base_abstime
;
340 * Update the commpage with current kdebug state. This currently has bits for
341 * global trace state, and typefilter enablement. It is likely additional state
342 * will be tracked in the future.
344 * INVARIANT: This value will always be 0 if global tracing is disabled. This
345 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
348 commpage_update_kdebug_state(void)
351 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE
+_COMM_PAGE_RW_OFFSET
)) = kdebug_commpage_state();
354 /* Ditto for atm_diagnostic_config */
356 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config
)
359 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG
+_COMM_PAGE_RW_OFFSET
)) = diagnostic_config
;
363 * Update the commpage data with the state of multiuser mode for
364 * this device. Allowing various services in userspace to avoid
365 * IPC in the (more common) non-multiuser environment.
368 commpage_update_multiuser_config(uint32_t multiuser_config
)
371 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG
+_COMM_PAGE_RW_OFFSET
)) = multiuser_config
;
375 * update the commpage data for
376 * last known value of mach_absolute_time()
380 commpage_update_mach_approximate_time(uint64_t abstime
)
382 #ifdef CONFIG_MACH_APPROXIMATE_TIME
383 uintptr_t approx_time_base
= (uintptr_t)(_COMM_PAGE_APPROX_TIME
+ _COMM_PAGE_RW_OFFSET
);
387 saved_data
= atomic_load_explicit((_Atomic
uint64_t *)approx_time_base
,
388 memory_order_relaxed
);
389 if (saved_data
< abstime
) {
390 /* ignoring the success/fail return value assuming that
391 * if the value has been updated since we last read it,
392 * "someone" has a newer timestamp than us and ours is
394 atomic_compare_exchange_strong_explicit((_Atomic
uint64_t *)approx_time_base
,
395 &saved_data
, abstime
, memory_order_relaxed
, memory_order_relaxed
);
399 #pragma unused (abstime)
404 * update the commpage data's total system sleep time for
405 * userspace call to mach_continuous_time()
408 commpage_update_mach_continuous_time(uint64_t sleeptime
)
412 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
)) = sleeptime
;
414 uint64_t *c_time_base
= (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
);
418 } while(!OSCompareAndSwap64(old
, sleeptime
, c_time_base
));
419 #endif /* __arm64__ */
424 * update the commpage's value for the boot time
427 commpage_update_boottime(uint64_t value
)
431 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC
+ _COMM_PAGE_RW_OFFSET
)) = value
;
433 uint64_t *cp
= (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC
+ _COMM_PAGE_RW_OFFSET
);
437 } while (!OSCompareAndSwap64(old_value
, value
, cp
));
438 #endif /* __arm64__ */
444 * After this counter has incremented, all running CPUs are guaranteed to
445 * have quiesced, i.e. executed serially dependent memory barriers.
446 * This is only tracked for CPUs running in userspace, therefore only useful
447 * outside the kernel.
449 * Note that you can't know which side of those barriers your read was from,
450 * so you have to observe 2 increments in order to ensure that you saw a
451 * serially dependent barrier chain across all running CPUs.
454 commpage_increment_cpu_quiescent_counter(void)
461 _Atomic
uint64_t *sched_gen
= (_Atomic
uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER
+
462 _COMM_PAGE_RW_OFFSET
);
464 * On 32bit architectures, double-wide atomic load or stores are a CAS,
465 * so the atomic increment is the most efficient way to increment the
468 * On 64bit architectures however, because the update is synchronized by
469 * the cpu mask, relaxed loads and stores is more efficient.
472 old_gen
= atomic_load_explicit(sched_gen
, memory_order_relaxed
);
473 atomic_store_explicit(sched_gen
, old_gen
+ 1, memory_order_relaxed
);
475 old_gen
= atomic_fetch_add_explicit(sched_gen
, 1, memory_order_relaxed
);