]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/commpage/commpage.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / commpage / commpage.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 6 *
5ba3f43e
A
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
0a7de745 15 *
5ba3f43e
A
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 18 *
5ba3f43e
A
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
0a7de745 26 *
5ba3f43e
A
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29/*
30 * @OSF_COPYRIGHT@
31 */
32/*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35/*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
d9a64523 39#include <libkern/section_keywords.h>
5ba3f43e
A
40#include <mach/mach_types.h>
41#include <mach/machine.h>
42#include <mach/vm_map.h>
43#include <machine/cpu_capabilities.h>
44#include <machine/commpage.h>
cb323159 45#include <machine/config.h>
5ba3f43e
A
46#include <machine/pmap.h>
47#include <vm/vm_kern.h>
48#include <vm/vm_map.h>
49#include <vm/vm_protos.h>
50#include <ipc/ipc_port.h>
0a7de745 51#include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
5ba3f43e
A
52#include <arm/rtclock.h>
53#include <libkern/OSAtomic.h>
54#include <stdatomic.h>
0a7de745
A
55#include <kern/remote_time.h>
56#include <machine/machine_remote_time.h>
f427ee49 57#include <machine/machine_routines.h>
5ba3f43e
A
58
59#include <sys/kdebug.h>
60
61#if CONFIG_ATM
62#include <atm/atm_internal.h>
63#endif
64
5ba3f43e
A
65static int commpage_cpus( void );
66
f427ee49
A
67
68static void commpage_init_cpu_capabilities( void );
69
0a7de745
A
70SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0;
71SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0;
f427ee49
A
72SECURITY_READ_ONLY_LATE(uint64_t) _cpu_capabilities = 0;
73SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_text_addr = 0;
74
75extern user64_addr_t commpage_text64_location;
76extern user32_addr_t commpage_text32_location;
5ba3f43e 77
d9a64523 78/* For sysctl access from BSD side */
0a7de745
A
79extern int gARMv81Atomics;
80extern int gARMv8Crc32;
cb323159 81extern int gARMv82FHM;
f427ee49
A
82extern int gARMv82SHA512;
83extern int gARMv82SHA3;
5ba3f43e
A
84
85void
f427ee49 86commpage_populate(void)
5ba3f43e 87{
0a7de745 88 uint16_t c2;
5ba3f43e
A
89 int cpufamily;
90
f427ee49
A
91 // Create the data and the text commpage
92 vm_map_address_t kernel_data_addr, kernel_text_addr, user_text_addr;
93 pmap_create_sharedpages(&kernel_data_addr, &kernel_text_addr, &user_text_addr);
94
95 sharedpage_rw_addr = kernel_data_addr;
96 sharedpage_rw_text_addr = kernel_text_addr;
97 commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
5ba3f43e 98
cb323159 99#if __arm64__
f427ee49 100 commpage_text64_location = user_text_addr;
cb323159
A
101 bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
102 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
103#else
f427ee49 104 commpage_text32_location = user_text_addr;
cb323159
A
105 bcopy(_COMM_PAGE32_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
106 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE32_SIGNATURE_STRING)));
107#endif
108
0a7de745 109 *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
5ba3f43e
A
110
111 commpage_init_cpu_capabilities();
112 commpage_set_timestamp(0, 0, 0, 0, 0);
113
0a7de745 114 if (_cpu_capabilities & kCache32) {
5ba3f43e 115 c2 = 32;
0a7de745 116 } else if (_cpu_capabilities & kCache64) {
5ba3f43e 117 c2 = 64;
0a7de745 118 } else if (_cpu_capabilities & kCache128) {
5ba3f43e 119 c2 = 128;
0a7de745 120 } else {
5ba3f43e 121 c2 = 0;
0a7de745 122 }
5ba3f43e 123
0a7de745 124 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
5ba3f43e
A
125
126 commpage_update_active_cpus();
127 cpufamily = cpuid_get_cpufamily();
128
0a7de745
A
129 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
130 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
131 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
132 *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
133 *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
cb323159 134 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
f427ee49 135 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
0a7de745 136 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
5ba3f43e
A
137
138#if __arm64__
0a7de745
A
139 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
140 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
cb323159 141#elif (__ARM_ARCH_7K__ >= 2)
5ba3f43e 142 /* enforce 16KB alignment for watch targets with new ABI */
0a7de745
A
143 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
144 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
5ba3f43e 145#else /* __arm64__ */
0a7de745
A
146 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
147 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
5ba3f43e
A
148#endif /* __arm64__ */
149
150 commpage_update_timebase();
151 commpage_update_mach_continuous_time(0);
152
153 clock_sec_t secs;
154 clock_usec_t microsecs;
155 clock_get_boottime_microtime(&secs, &microsecs);
156 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
157
0a7de745
A
158 /*
159 * set commpage approximate time to zero for initialization.
5ba3f43e
A
160 * scheduler shall populate correct value before running user thread
161 */
0a7de745 162 *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
5ba3f43e 163#ifdef CONFIG_MACH_APPROXIMATE_TIME
0a7de745 164 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
5ba3f43e 165#else
0a7de745 166 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
5ba3f43e
A
167#endif
168
169 commpage_update_kdebug_state();
170
171#if CONFIG_ATM
172 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
173#endif
174
0a7de745
A
175
176 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
5ba3f43e
A
177}
178
f427ee49
A
179#define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
180#define COMMPAGE_TEXT_SECTION "__commpage_text"
181
182/* Get a pointer to the start of the ARM PFZ code section. This macro tell the
183 * linker that the storage for the variable here is at the start of the section */
184extern char commpage_text_start[]
185__SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
186
187/* Get a pointer to the end of the ARM PFZ code section. This macro tell the
188 * linker that the storage for the variable here is at the end of the section */
189extern char commpage_text_end[]
190__SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
191
192/* This is defined in the commpage text section as a symbol at the start of the preemptible
193 * functions */
194extern char commpage_text_preemptible_functions;
195
196#if CONFIG_ARM_PFZ
197static size_t size_of_pfz = 0;
198#endif
199
200/* This is the opcode for brk #666 */
201#define BRK_666_OPCODE 0xD4205340
202
203void
204commpage_text_populate(void)
205{
206#if CONFIG_ARM_PFZ
207 size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
208 if (size_of_commpage_text == 0) {
209 panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
210 }
211 assert(size_of_commpage_text <= PAGE_SIZE);
212 assert(size_of_commpage_text > 0);
213
214 /* Get the size of the PFZ half of the comm page text section. */
215 size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
216
217 // Copy the code segment of comm page text section into the PFZ
218 memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
219
220 // Make sure to populate the rest of it with brk 666 so that undefined code
221 // doesn't get run
222 memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
223 PAGE_SIZE - size_of_commpage_text);
224#endif
225}
226
227uint32_t
228commpage_is_in_pfz64(addr64_t addr64)
229{
230#if CONFIG_ARM_PFZ
231 if ((addr64 >= commpage_text64_location) &&
232 (addr64 < (commpage_text64_location + size_of_pfz))) {
233 return 1;
234 } else {
235 return 0;
236 }
237#else
238#pragma unused (addr64)
239 return 0;
240#endif
241}
242
5ba3f43e
A
243
244void
245commpage_set_timestamp(
0a7de745
A
246 uint64_t tbr,
247 uint64_t secs,
248 uint64_t frac,
249 uint64_t scale,
250 uint64_t tick_per_sec)
5ba3f43e
A
251{
252 new_commpage_timeofday_data_t *commpage_timeofday_datap;
253
0a7de745 254 if (commPagePtr == 0) {
5ba3f43e 255 return;
0a7de745 256 }
5ba3f43e 257
0a7de745 258 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
5ba3f43e
A
259
260 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
261
0a7de745
A
262#if (__ARM_ARCH__ >= 7)
263 __asm__ volatile ("dmb ish");
5ba3f43e
A
264#endif
265 commpage_timeofday_datap->TimeStamp_sec = secs;
266 commpage_timeofday_datap->TimeStamp_frac = frac;
267 commpage_timeofday_datap->Ticks_scale = scale;
268 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
269
0a7de745
A
270#if (__ARM_ARCH__ >= 7)
271 __asm__ volatile ("dmb ish");
5ba3f43e
A
272#endif
273 commpage_timeofday_datap->TimeStamp_tick = tbr;
f427ee49 274
5ba3f43e
A
275}
276
277/*
278 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
279 */
280
281void
282commpage_set_memory_pressure(
0a7de745 283 unsigned int pressure )
5ba3f43e 284{
0a7de745 285 if (commPagePtr == 0) {
5ba3f43e 286 return;
0a7de745
A
287 }
288 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
5ba3f43e
A
289}
290
5ba3f43e
A
291/*
292 * Determine number of CPUs on this system.
293 */
294static int
295commpage_cpus( void )
296{
297 int cpus;
298
f427ee49 299 cpus = machine_info.max_cpus;
5ba3f43e 300
0a7de745 301 if (cpus == 0) {
5ba3f43e 302 panic("commpage cpus==0");
0a7de745
A
303 }
304 if (cpus > 0xFF) {
5ba3f43e 305 cpus = 0xFF;
0a7de745 306 }
5ba3f43e
A
307
308 return cpus;
309}
310
f427ee49 311uint64_t
cb323159
A
312_get_cpu_capabilities(void)
313{
314 return _cpu_capabilities;
315}
316
d9a64523
A
317vm_address_t
318_get_commpage_priv_address(void)
319{
320 return sharedpage_rw_addr;
321}
322
f427ee49
A
323vm_address_t
324_get_commpage_text_priv_address(void)
325{
326 return sharedpage_rw_text_addr;
327}
328
5ba3f43e
A
329/*
330 * Initialize _cpu_capabilities vector
331 */
332static void
333commpage_init_cpu_capabilities( void )
334{
f427ee49 335 uint64_t bits;
5ba3f43e
A
336 int cpus;
337 ml_cpu_info_t cpu_info;
338
339 bits = 0;
340 ml_cpu_get_info(&cpu_info);
341
342 switch (cpu_info.cache_line_size) {
0a7de745
A
343 case 128:
344 bits |= kCache128;
345 break;
346 case 64:
347 bits |= kCache64;
348 break;
349 case 32:
350 bits |= kCache32;
351 break;
352 default:
353 break;
5ba3f43e
A
354 }
355 cpus = commpage_cpus();
356
0a7de745 357 if (cpus == 1) {
5ba3f43e 358 bits |= kUP;
0a7de745 359 }
5ba3f43e
A
360
361 bits |= (cpus << kNumCPUsShift);
362
363 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
5c9f4661 364
0a7de745 365#if __ARM_VFP__
5ba3f43e 366 bits |= kHasVfp;
5c9f4661 367 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
0a7de745 368 if (mvfp_info->neon) {
5c9f4661 369 bits |= kHasNeon;
0a7de745
A
370 }
371 if (mvfp_info->neon_hpfp) {
5ba3f43e 372 bits |= kHasNeonHPFP;
0a7de745
A
373 }
374 if (mvfp_info->neon_fp16) {
d9a64523 375 bits |= kHasNeonFP16;
0a7de745 376 }
5ba3f43e
A
377#endif
378#if defined(__arm64__)
379 bits |= kHasFMA;
380#endif
0a7de745 381#if __ARM_ENABLE_WFE_
5ba3f43e
A
382 bits |= kHasEvent;
383#endif
0a7de745 384#if __ARM_V8_CRYPTO_EXTENSIONS__
5ba3f43e
A
385 bits |= kHasARMv8Crypto;
386#endif
387#ifdef __arm64__
d9a64523
A
388 uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
389 if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) == ID_AA64ISAR0_EL1_ATOMIC_8_1) {
5ba3f43e
A
390 bits |= kHasARMv81Atomics;
391 gARMv81Atomics = 1;
392 }
d9a64523
A
393 if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
394 bits |= kHasARMv8Crc32;
395 gARMv8Crc32 = 1;
396 }
cb323159
A
397 if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
398 bits |= kHasARMv82FHM;
399 gARMv82FHM = 1;
400 }
f427ee49
A
401
402 if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) > ID_AA64ISAR0_EL1_SHA2_EN) {
403 bits |= kHasARMv82SHA512;
404 gARMv82SHA512 = 1;
405 }
406 if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
407 bits |= kHasARMv82SHA3;
408 gARMv82SHA3 = 1;
409 }
410
5ba3f43e 411#endif
cb323159
A
412
413
414
415
5ba3f43e
A
416 _cpu_capabilities = bits;
417
f427ee49
A
418 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
419 *((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
5ba3f43e
A
420}
421
422/*
423 * Updated every time a logical CPU goes offline/online
424 */
425void
426commpage_update_active_cpus(void)
427{
0a7de745
A
428 if (!commPagePtr) {
429 return;
430 }
f427ee49
A
431 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
432
5ba3f43e
A
433}
434
435/*
436 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
437 */
438void
439commpage_update_timebase(void)
440{
441 if (commPagePtr) {
0a7de745 442 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
5ba3f43e
A
443 }
444}
445
446/*
447 * Update the commpage with current kdebug state. This currently has bits for
448 * global trace state, and typefilter enablement. It is likely additional state
449 * will be tracked in the future.
450 *
451 * INVARIANT: This value will always be 0 if global tracing is disabled. This
452 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
453 */
454void
455commpage_update_kdebug_state(void)
456{
0a7de745
A
457 if (commPagePtr) {
458 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
459 }
5ba3f43e
A
460}
461
462/* Ditto for atm_diagnostic_config */
463void
464commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
465{
0a7de745
A
466 if (commPagePtr) {
467 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
468 }
5ba3f43e
A
469}
470
471/*
472 * Update the commpage data with the state of multiuser mode for
473 * this device. Allowing various services in userspace to avoid
474 * IPC in the (more common) non-multiuser environment.
475 */
476void
477commpage_update_multiuser_config(uint32_t multiuser_config)
478{
0a7de745
A
479 if (commPagePtr) {
480 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
481 }
5ba3f43e
A
482}
483
484/*
0a7de745 485 * update the commpage data for
5ba3f43e
A
486 * last known value of mach_absolute_time()
487 */
488
489void
490commpage_update_mach_approximate_time(uint64_t abstime)
491{
492#ifdef CONFIG_MACH_APPROXIMATE_TIME
493 uintptr_t approx_time_base = (uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
494 uint64_t saved_data;
495
496 if (commPagePtr) {
497 saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base,
0a7de745 498 memory_order_relaxed);
5ba3f43e
A
499 if (saved_data < abstime) {
500 /* ignoring the success/fail return value assuming that
501 * if the value has been updated since we last read it,
502 * "someone" has a newer timestamp than us and ours is
503 * now invalid. */
0a7de745
A
504 atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base,
505 &saved_data, abstime, memory_order_relaxed, memory_order_relaxed);
5ba3f43e
A
506 }
507 }
508#else
509#pragma unused (abstime)
510#endif
511}
512
513/*
0a7de745 514 * update the commpage data's total system sleep time for
5ba3f43e
A
515 * userspace call to mach_continuous_time()
516 */
517void
518commpage_update_mach_continuous_time(uint64_t sleeptime)
519{
520 if (commPagePtr) {
521#ifdef __arm64__
522 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = sleeptime;
523#else
524 uint64_t *c_time_base = (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
525 uint64_t old;
526 do {
527 old = *c_time_base;
0a7de745 528 } while (!OSCompareAndSwap64(old, sleeptime, c_time_base));
5ba3f43e
A
529#endif /* __arm64__ */
530 }
531}
532
f427ee49
A
533void
534commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
535{
536 *((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
537}
538
5ba3f43e
A
539/*
540 * update the commpage's value for the boot time
541 */
542void
543commpage_update_boottime(uint64_t value)
544{
545 if (commPagePtr) {
546#ifdef __arm64__
547 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET)) = value;
548#else
549 uint64_t *cp = (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
550 uint64_t old_value;
551 do {
552 old_value = *cp;
553 } while (!OSCompareAndSwap64(old_value, value, cp));
554#endif /* __arm64__ */
555 }
556}
d26ffc64 557
0a7de745
A
558/*
559 * set the commpage's remote time params for
560 * userspace call to mach_bridge_remote_time()
561 */
562void
563commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
564{
565 if (commPagePtr) {
566#ifdef __arm64__
567 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
568 paramsp->base_local_ts = 0;
569 __asm__ volatile ("dmb ish" ::: "memory");
570 paramsp->rate = rate;
571 paramsp->base_remote_ts = base_remote_ts;
572 __asm__ volatile ("dmb ish" ::: "memory");
573 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
574#else
575 (void)rate;
576 (void)base_local_ts;
577 (void)base_remote_ts;
578#endif /* __arm64__ */
579 }
580}
581
d9a64523 582
d26ffc64 583/*
d9a64523
A
584 * After this counter has incremented, all running CPUs are guaranteed to
585 * have quiesced, i.e. executed serially dependent memory barriers.
586 * This is only tracked for CPUs running in userspace, therefore only useful
587 * outside the kernel.
588 *
589 * Note that you can't know which side of those barriers your read was from,
590 * so you have to observe 2 increments in order to ensure that you saw a
591 * serially dependent barrier chain across all running CPUs.
d26ffc64 592 */
d9a64523
A
593uint64_t
594commpage_increment_cpu_quiescent_counter(void)
595{
0a7de745 596 if (!commPagePtr) {
d9a64523 597 return 0;
0a7de745 598 }
d9a64523
A
599
600 uint64_t old_gen;
601
602 _Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
0a7de745 603 _COMM_PAGE_RW_OFFSET);
d9a64523
A
604 /*
605 * On 32bit architectures, double-wide atomic load or stores are a CAS,
606 * so the atomic increment is the most efficient way to increment the
607 * counter.
608 *
609 * On 64bit architectures however, because the update is synchronized by
610 * the cpu mask, relaxed loads and stores is more efficient.
611 */
612#if __LP64__
cb323159
A
613 old_gen = os_atomic_load(sched_gen, relaxed);
614 os_atomic_store(sched_gen, old_gen + 1, relaxed);
d26ffc64 615#else
d9a64523
A
616 old_gen = atomic_fetch_add_explicit(sched_gen, 1, memory_order_relaxed);
617#endif
618 return old_gen;
d26ffc64 619}
cb323159
A
620
621/*
622 * update the commpage with if dtrace user land probes are enabled
623 */
624void
625commpage_update_dof(boolean_t enabled)
626{
627#if CONFIG_DTRACE
628 *((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
629#else
630 (void)enabled;
631#endif
632}
633
634/*
635 * update the dyld global config flags
636 */
637void
638commpage_update_dyld_flags(uint64_t value)
639{
f427ee49
A
640 *((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
641
cb323159 642}