2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 * @APPLE_FREE_COPYRIGHT@
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
52 #include <arm/rtclock.h>
53 #include <libkern/OSAtomic.h>
54 #include <stdatomic.h>
55 #include <kern/remote_time.h>
56 #include <machine/machine_remote_time.h>
57 #include <machine/machine_routines.h>
59 #include <sys/kdebug.h>
62 #include <atm/atm_internal.h>
65 static int commpage_cpus( void );
68 static void commpage_init_cpu_capabilities( void );
70 SECURITY_READ_ONLY_LATE(vm_address_t
) commPagePtr
= 0;
71 SECURITY_READ_ONLY_LATE(vm_address_t
) sharedpage_rw_addr
= 0;
72 SECURITY_READ_ONLY_LATE(uint64_t) _cpu_capabilities
= 0;
73 SECURITY_READ_ONLY_LATE(vm_address_t
) sharedpage_rw_text_addr
= 0;
75 extern user64_addr_t commpage_text64_location
;
76 extern user32_addr_t commpage_text32_location
;
78 /* For sysctl access from BSD side */
79 extern int gARMv81Atomics
;
80 extern int gARMv8Crc32
;
81 extern int gARMv82FHM
;
82 extern int gARMv82SHA512
;
83 extern int gARMv82SHA3
;
86 commpage_populate(void)
91 // Create the data and the text commpage
92 vm_map_address_t kernel_data_addr
, kernel_text_addr
, user_text_addr
;
93 pmap_create_sharedpages(&kernel_data_addr
, &kernel_text_addr
, &user_text_addr
);
95 sharedpage_rw_addr
= kernel_data_addr
;
96 sharedpage_rw_text_addr
= kernel_text_addr
;
97 commPagePtr
= (vm_address_t
) _COMM_PAGE_BASE_ADDRESS
;
100 commpage_text64_location
= user_text_addr
;
101 bcopy(_COMM_PAGE64_SIGNATURE_STRING
, (void *)(_COMM_PAGE_SIGNATURE
+ _COMM_PAGE_RW_OFFSET
),
102 MIN(_COMM_PAGE_SIGNATURELEN
, strlen(_COMM_PAGE64_SIGNATURE_STRING
)));
104 commpage_text32_location
= user_text_addr
;
105 bcopy(_COMM_PAGE32_SIGNATURE_STRING
, (void *)(_COMM_PAGE_SIGNATURE
+ _COMM_PAGE_RW_OFFSET
),
106 MIN(_COMM_PAGE_SIGNATURELEN
, strlen(_COMM_PAGE32_SIGNATURE_STRING
)));
109 *((uint16_t*)(_COMM_PAGE_VERSION
+ _COMM_PAGE_RW_OFFSET
)) = (uint16_t) _COMM_PAGE_THIS_VERSION
;
111 commpage_init_cpu_capabilities();
112 commpage_set_timestamp(0, 0, 0, 0, 0);
114 if (_cpu_capabilities
& kCache32
) {
116 } else if (_cpu_capabilities
& kCache64
) {
118 } else if (_cpu_capabilities
& kCache128
) {
124 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE
+ _COMM_PAGE_RW_OFFSET
)) = c2
;
126 commpage_update_active_cpus();
127 cpufamily
= cpuid_get_cpufamily();
129 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) machine_info
.physical_cpu_max
;
130 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) machine_info
.logical_cpu_max
;
131 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE
+ _COMM_PAGE_RW_OFFSET
)) = machine_info
.max_mem
;
132 *((uint32_t*)(_COMM_PAGE_CPUFAMILY
+ _COMM_PAGE_RW_OFFSET
)) = (uint32_t)cpufamily
;
133 *((uint32_t*)(_COMM_PAGE_DEV_FIRM
+ _COMM_PAGE_RW_OFFSET
)) = (uint32_t)PE_i_can_has_debugger(NULL
);
134 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
)) = user_timebase_type();
135 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t)user_cont_hwclock_allowed();
136 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) page_shift
;
139 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) page_shift_user32
;
140 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
141 #elif (__ARM_ARCH_7K__ >= 2)
142 /* enforce 16KB alignment for watch targets with new ABI */
143 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
144 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) SIXTEENK_PAGE_SHIFT
;
145 #else /* __arm64__ */
146 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) PAGE_SHIFT
;
147 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t) PAGE_SHIFT
;
148 #endif /* __arm64__ */
150 commpage_update_timebase();
151 commpage_update_mach_continuous_time(0);
154 clock_usec_t microsecs
;
155 clock_get_boottime_microtime(&secs
, µsecs
);
156 commpage_update_boottime(secs
* USEC_PER_SEC
+ microsecs
);
159 * set commpage approximate time to zero for initialization.
160 * scheduler shall populate correct value before running user thread
162 *((uint64_t *)(_COMM_PAGE_APPROX_TIME
+ _COMM_PAGE_RW_OFFSET
)) = 0;
163 #ifdef CONFIG_MACH_APPROXIMATE_TIME
164 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED
+ _COMM_PAGE_RW_OFFSET
)) = 1;
166 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED
+ _COMM_PAGE_RW_OFFSET
)) = 0;
169 commpage_update_kdebug_state();
172 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
176 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS
+ _COMM_PAGE_RW_OFFSET
)) = BT_RESET_SENTINEL_TS
;
179 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
180 #define COMMPAGE_TEXT_SECTION "__commpage_text"
182 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
183 * linker that the storage for the variable here is at the start of the section */
184 extern char commpage_text_start
[]
185 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT
, COMMPAGE_TEXT_SECTION
);
187 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
188 * linker that the storage for the variable here is at the end of the section */
189 extern char commpage_text_end
[]
190 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT
, COMMPAGE_TEXT_SECTION
);
192 /* This is defined in the commpage text section as a symbol at the start of the preemptible
194 extern char commpage_text_preemptible_functions
;
197 static size_t size_of_pfz
= 0;
200 /* This is the opcode for brk #666 */
201 #define BRK_666_OPCODE 0xD4205340
204 commpage_text_populate(void)
207 size_t size_of_commpage_text
= commpage_text_end
- commpage_text_start
;
208 if (size_of_commpage_text
== 0) {
209 panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT
, COMMPAGE_TEXT_SECTION
);
211 assert(size_of_commpage_text
<= PAGE_SIZE
);
212 assert(size_of_commpage_text
> 0);
214 /* Get the size of the PFZ half of the comm page text section. */
215 size_of_pfz
= &commpage_text_preemptible_functions
- commpage_text_start
;
217 // Copy the code segment of comm page text section into the PFZ
218 memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS
, (void *) commpage_text_start
, size_of_commpage_text
);
220 // Make sure to populate the rest of it with brk 666 so that undefined code
222 memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS
+ size_of_commpage_text
, BRK_666_OPCODE
,
223 PAGE_SIZE
- size_of_commpage_text
);
228 commpage_is_in_pfz64(addr64_t addr64
)
231 if ((addr64
>= commpage_text64_location
) &&
232 (addr64
< (commpage_text64_location
+ size_of_pfz
))) {
238 #pragma unused (addr64)
245 commpage_set_timestamp(
250 uint64_t tick_per_sec
)
252 new_commpage_timeofday_data_t
*commpage_timeofday_datap
;
254 if (commPagePtr
== 0) {
258 commpage_timeofday_datap
= (new_commpage_timeofday_data_t
*)(_COMM_PAGE_NEWTIMEOFDAY_DATA
+ _COMM_PAGE_RW_OFFSET
);
260 commpage_timeofday_datap
->TimeStamp_tick
= 0x0ULL
;
262 #if (__ARM_ARCH__ >= 7)
263 __asm__
volatile ("dmb ish");
265 commpage_timeofday_datap
->TimeStamp_sec
= secs
;
266 commpage_timeofday_datap
->TimeStamp_frac
= frac
;
267 commpage_timeofday_datap
->Ticks_scale
= scale
;
268 commpage_timeofday_datap
->Ticks_per_sec
= tick_per_sec
;
270 #if (__ARM_ARCH__ >= 7)
271 __asm__
volatile ("dmb ish");
273 commpage_timeofday_datap
->TimeStamp_tick
= tbr
;
278 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
282 commpage_set_memory_pressure(
283 unsigned int pressure
)
285 if (commPagePtr
== 0) {
288 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE
+ _COMM_PAGE_RW_OFFSET
)) = pressure
;
292 * Determine number of CPUs on this system.
295 commpage_cpus( void )
299 cpus
= machine_info
.max_cpus
;
302 panic("commpage cpus==0");
312 _get_cpu_capabilities(void)
314 return _cpu_capabilities
;
318 _get_commpage_priv_address(void)
320 return sharedpage_rw_addr
;
324 _get_commpage_text_priv_address(void)
326 return sharedpage_rw_text_addr
;
330 * Initialize _cpu_capabilities vector
333 commpage_init_cpu_capabilities( void )
337 ml_cpu_info_t cpu_info
;
340 ml_cpu_get_info(&cpu_info
);
342 switch (cpu_info
.cache_line_size
) {
355 cpus
= commpage_cpus();
361 bits
|= (cpus
<< kNumCPUsShift
);
363 bits
|= kFastThreadLocalStorage
; // TPIDRURO for TLS
367 arm_mvfp_info_t
*mvfp_info
= arm_mvfp_info();
368 if (mvfp_info
->neon
) {
371 if (mvfp_info
->neon_hpfp
) {
372 bits
|= kHasNeonHPFP
;
374 if (mvfp_info
->neon_fp16
) {
375 bits
|= kHasNeonFP16
;
378 #if defined(__arm64__)
381 #if __ARM_ENABLE_WFE_
384 #if __ARM_V8_CRYPTO_EXTENSIONS__
385 bits
|= kHasARMv8Crypto
;
388 uint64_t isar0
= __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
389 if ((isar0
& ID_AA64ISAR0_EL1_ATOMIC_MASK
) == ID_AA64ISAR0_EL1_ATOMIC_8_1
) {
390 bits
|= kHasARMv81Atomics
;
393 if ((isar0
& ID_AA64ISAR0_EL1_CRC32_MASK
) == ID_AA64ISAR0_EL1_CRC32_EN
) {
394 bits
|= kHasARMv8Crc32
;
397 if ((isar0
& ID_AA64ISAR0_EL1_FHM_MASK
) >= ID_AA64ISAR0_EL1_FHM_8_2
) {
398 bits
|= kHasARMv82FHM
;
402 if ((isar0
& ID_AA64ISAR0_EL1_SHA2_MASK
) > ID_AA64ISAR0_EL1_SHA2_EN
) {
403 bits
|= kHasARMv82SHA512
;
406 if ((isar0
& ID_AA64ISAR0_EL1_SHA3_MASK
) >= ID_AA64ISAR0_EL1_SHA3_EN
) {
407 bits
|= kHasARMv82SHA3
;
416 _cpu_capabilities
= bits
;
418 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES
+ _COMM_PAGE_RW_OFFSET
)) = (uint32_t)_cpu_capabilities
;
419 *((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64
+ _COMM_PAGE_RW_OFFSET
)) = _cpu_capabilities
;
423 * Updated every time a logical CPU goes offline/online
426 commpage_update_active_cpus(void)
431 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS
+ _COMM_PAGE_RW_OFFSET
)) = (uint8_t)processor_avail_count
;
436 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
439 commpage_update_timebase(void)
442 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET
+ _COMM_PAGE_RW_OFFSET
)) = rtclock_base_abstime
;
447 * Update the commpage with current kdebug state. This currently has bits for
448 * global trace state, and typefilter enablement. It is likely additional state
449 * will be tracked in the future.
451 * INVARIANT: This value will always be 0 if global tracing is disabled. This
452 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
455 commpage_update_kdebug_state(void)
458 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE
+ _COMM_PAGE_RW_OFFSET
)) = kdebug_commpage_state();
462 /* Ditto for atm_diagnostic_config */
464 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config
)
467 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG
+ _COMM_PAGE_RW_OFFSET
)) = diagnostic_config
;
472 * Update the commpage data with the state of multiuser mode for
473 * this device. Allowing various services in userspace to avoid
474 * IPC in the (more common) non-multiuser environment.
477 commpage_update_multiuser_config(uint32_t multiuser_config
)
480 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG
+ _COMM_PAGE_RW_OFFSET
)) = multiuser_config
;
485 * update the commpage data for
486 * last known value of mach_absolute_time()
490 commpage_update_mach_approximate_time(uint64_t abstime
)
492 #ifdef CONFIG_MACH_APPROXIMATE_TIME
493 uintptr_t approx_time_base
= (uintptr_t)(_COMM_PAGE_APPROX_TIME
+ _COMM_PAGE_RW_OFFSET
);
497 saved_data
= atomic_load_explicit((_Atomic
uint64_t *)approx_time_base
,
498 memory_order_relaxed
);
499 if (saved_data
< abstime
) {
500 /* ignoring the success/fail return value assuming that
501 * if the value has been updated since we last read it,
502 * "someone" has a newer timestamp than us and ours is
504 atomic_compare_exchange_strong_explicit((_Atomic
uint64_t *)approx_time_base
,
505 &saved_data
, abstime
, memory_order_relaxed
, memory_order_relaxed
);
509 #pragma unused (abstime)
514 * update the commpage data's total system sleep time for
515 * userspace call to mach_continuous_time()
518 commpage_update_mach_continuous_time(uint64_t sleeptime
)
522 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
)) = sleeptime
;
524 uint64_t *c_time_base
= (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
);
528 } while (!OSCompareAndSwap64(old
, sleeptime
, c_time_base
));
529 #endif /* __arm64__ */
534 commpage_update_mach_continuous_time_hw_offset(uint64_t offset
)
536 *((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE
+ _COMM_PAGE_RW_OFFSET
)) = offset
;
540 * update the commpage's value for the boot time
543 commpage_update_boottime(uint64_t value
)
547 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC
+ _COMM_PAGE_RW_OFFSET
)) = value
;
549 uint64_t *cp
= (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC
+ _COMM_PAGE_RW_OFFSET
);
553 } while (!OSCompareAndSwap64(old_value
, value
, cp
));
554 #endif /* __arm64__ */
559 * set the commpage's remote time params for
560 * userspace call to mach_bridge_remote_time()
563 commpage_set_remotetime_params(double rate
, uint64_t base_local_ts
, uint64_t base_remote_ts
)
567 struct bt_params
*paramsp
= (struct bt_params
*)(_COMM_PAGE_REMOTETIME_PARAMS
+ _COMM_PAGE_RW_OFFSET
);
568 paramsp
->base_local_ts
= 0;
569 __asm__
volatile ("dmb ish" ::: "memory");
570 paramsp
->rate
= rate
;
571 paramsp
->base_remote_ts
= base_remote_ts
;
572 __asm__
volatile ("dmb ish" ::: "memory");
573 paramsp
->base_local_ts
= base_local_ts
; //This will act as a generation count
577 (void)base_remote_ts
;
578 #endif /* __arm64__ */
584 * After this counter has incremented, all running CPUs are guaranteed to
585 * have quiesced, i.e. executed serially dependent memory barriers.
586 * This is only tracked for CPUs running in userspace, therefore only useful
587 * outside the kernel.
589 * Note that you can't know which side of those barriers your read was from,
590 * so you have to observe 2 increments in order to ensure that you saw a
591 * serially dependent barrier chain across all running CPUs.
594 commpage_increment_cpu_quiescent_counter(void)
602 _Atomic
uint64_t *sched_gen
= (_Atomic
uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER
+
603 _COMM_PAGE_RW_OFFSET
);
605 * On 32bit architectures, double-wide atomic load or stores are a CAS,
606 * so the atomic increment is the most efficient way to increment the
609 * On 64bit architectures however, because the update is synchronized by
610 * the cpu mask, relaxed loads and stores is more efficient.
613 old_gen
= os_atomic_load(sched_gen
, relaxed
);
614 os_atomic_store(sched_gen
, old_gen
+ 1, relaxed
);
616 old_gen
= atomic_fetch_add_explicit(sched_gen
, 1, memory_order_relaxed
);
622 * update the commpage with if dtrace user land probes are enabled
625 commpage_update_dof(boolean_t enabled
)
628 *((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED
+ _COMM_PAGE_RW_OFFSET
)) = (enabled
? 1 : 0);
635 * update the dyld global config flags
638 commpage_update_dyld_flags(uint64_t value
)
640 *((uint64_t*)(_COMM_PAGE_DYLD_FLAGS
+ _COMM_PAGE_RW_OFFSET
)) = value
;