]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/commpage/commpage.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / commpage / commpage.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/pmap.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_protos.h>
49 #include <ipc/ipc_port.h>
50 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
51 #include <arm/rtclock.h>
52 #include <libkern/OSAtomic.h>
53 #include <stdatomic.h>
54 #include <kern/remote_time.h>
55 #include <machine/machine_remote_time.h>
56
57 #include <sys/kdebug.h>
58
59 #if CONFIG_ATM
60 #include <atm/atm_internal.h>
61 #endif
62
63 static void commpage_init_cpu_capabilities( void );
64 static int commpage_cpus( void );
65
66 SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0;
67 SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0;
68 SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities = 0;
69
70 /* For sysctl access from BSD side */
71 extern int gARMv81Atomics;
72 extern int gARMv8Crc32;
73
74 void
75 commpage_populate(
76 void)
77 {
78 uint16_t c2;
79 int cpufamily;
80
81 sharedpage_rw_addr = pmap_create_sharedpage();
82 commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS;
83
84 *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
85
86 commpage_init_cpu_capabilities();
87 commpage_set_timestamp(0, 0, 0, 0, 0);
88
89 if (_cpu_capabilities & kCache32) {
90 c2 = 32;
91 } else if (_cpu_capabilities & kCache64) {
92 c2 = 64;
93 } else if (_cpu_capabilities & kCache128) {
94 c2 = 128;
95 } else {
96 c2 = 0;
97 }
98
99 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
100 *((uint32_t*)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = 1;
101
102 commpage_update_active_cpus();
103 cpufamily = cpuid_get_cpufamily();
104
105 /* machine_info valid after ml_get_max_cpus() */
106 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
107 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
108 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
109 *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
110 *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
111 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_allowed();
112 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed();
113 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
114
115 #if __arm64__
116 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
117 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
118 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
119 /* enforce 16KB alignment for watch targets with new ABI */
120 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
121 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
122 #else /* __arm64__ */
123 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
124 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
125 #endif /* __arm64__ */
126
127 commpage_update_timebase();
128 commpage_update_mach_continuous_time(0);
129
130 clock_sec_t secs;
131 clock_usec_t microsecs;
132 clock_get_boottime_microtime(&secs, &microsecs);
133 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
134
135 /*
136 * set commpage approximate time to zero for initialization.
137 * scheduler shall populate correct value before running user thread
138 */
139 *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
140 #ifdef CONFIG_MACH_APPROXIMATE_TIME
141 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
142 #else
143 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
144 #endif
145
146 commpage_update_kdebug_state();
147
148 #if CONFIG_ATM
149 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
150 #endif
151
152
153 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
154 }
155
156 struct mu {
157 uint64_t m; // magic number
158 int32_t a; // add indicator
159 int32_t s; // shift amount
160 };
161
162 void
163 commpage_set_timestamp(
164 uint64_t tbr,
165 uint64_t secs,
166 uint64_t frac,
167 uint64_t scale,
168 uint64_t tick_per_sec)
169 {
170 new_commpage_timeofday_data_t *commpage_timeofday_datap;
171
172 if (commPagePtr == 0) {
173 return;
174 }
175
176 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
177
178 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
179
180 #if (__ARM_ARCH__ >= 7)
181 __asm__ volatile ("dmb ish");
182 #endif
183 commpage_timeofday_datap->TimeStamp_sec = secs;
184 commpage_timeofday_datap->TimeStamp_frac = frac;
185 commpage_timeofday_datap->Ticks_scale = scale;
186 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
187
188 #if (__ARM_ARCH__ >= 7)
189 __asm__ volatile ("dmb ish");
190 #endif
191 commpage_timeofday_datap->TimeStamp_tick = tbr;
192 }
193
194 /*
195 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
196 */
197
198 void
199 commpage_set_memory_pressure(
200 unsigned int pressure )
201 {
202 if (commPagePtr == 0) {
203 return;
204 }
205 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
206 }
207
208 /*
209 * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc.
210 */
211
212 void
213 commpage_set_spin_count(
214 unsigned int count )
215 {
216 if (count == 0) { /* we test for 0 after decrement, not before */
217 count = 1;
218 }
219
220 if (commPagePtr == 0) {
221 return;
222 }
223 *((uint32_t *)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = count;
224 }
225
226 /*
227 * Determine number of CPUs on this system.
228 */
229 static int
230 commpage_cpus( void )
231 {
232 int cpus;
233
234 cpus = ml_get_max_cpus(); // NB: this call can block
235
236 if (cpus == 0) {
237 panic("commpage cpus==0");
238 }
239 if (cpus > 0xFF) {
240 cpus = 0xFF;
241 }
242
243 return cpus;
244 }
245
246 vm_address_t
247 _get_commpage_priv_address(void)
248 {
249 return sharedpage_rw_addr;
250 }
251
252 /*
253 * Initialize _cpu_capabilities vector
254 */
255 static void
256 commpage_init_cpu_capabilities( void )
257 {
258 uint32_t bits;
259 int cpus;
260 ml_cpu_info_t cpu_info;
261
262 bits = 0;
263 ml_cpu_get_info(&cpu_info);
264
265 switch (cpu_info.cache_line_size) {
266 case 128:
267 bits |= kCache128;
268 break;
269 case 64:
270 bits |= kCache64;
271 break;
272 case 32:
273 bits |= kCache32;
274 break;
275 default:
276 break;
277 }
278 cpus = commpage_cpus();
279
280 if (cpus == 1) {
281 bits |= kUP;
282 }
283
284 bits |= (cpus << kNumCPUsShift);
285
286 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
287
288 #if __ARM_VFP__
289 bits |= kHasVfp;
290 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
291 if (mvfp_info->neon) {
292 bits |= kHasNeon;
293 }
294 if (mvfp_info->neon_hpfp) {
295 bits |= kHasNeonHPFP;
296 }
297 if (mvfp_info->neon_fp16) {
298 bits |= kHasNeonFP16;
299 }
300 #endif
301 #if defined(__arm64__)
302 bits |= kHasFMA;
303 #endif
304 #if __ARM_ENABLE_WFE_
305 #ifdef __arm64__
306 if (arm64_wfe_allowed()) {
307 bits |= kHasEvent;
308 }
309 #else
310 bits |= kHasEvent;
311 #endif
312 #endif
313 #if __ARM_V8_CRYPTO_EXTENSIONS__
314 bits |= kHasARMv8Crypto;
315 #endif
316 #ifdef __arm64__
317 uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
318 if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) == ID_AA64ISAR0_EL1_ATOMIC_8_1) {
319 bits |= kHasARMv81Atomics;
320 gARMv81Atomics = 1;
321 }
322 if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
323 bits |= kHasARMv8Crc32;
324 gARMv8Crc32 = 1;
325 }
326 #endif
327 _cpu_capabilities = bits;
328
329 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
330 }
331
332 /*
333 * Updated every time a logical CPU goes offline/online
334 */
335 void
336 commpage_update_active_cpus(void)
337 {
338 if (!commPagePtr) {
339 return;
340 }
341 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = processor_avail_count;
342 }
343
344 /*
345 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
346 */
347 void
348 commpage_update_timebase(void)
349 {
350 if (commPagePtr) {
351 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
352 }
353 }
354
355 /*
356 * Update the commpage with current kdebug state. This currently has bits for
357 * global trace state, and typefilter enablement. It is likely additional state
358 * will be tracked in the future.
359 *
360 * INVARIANT: This value will always be 0 if global tracing is disabled. This
361 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
362 */
363 void
364 commpage_update_kdebug_state(void)
365 {
366 if (commPagePtr) {
367 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
368 }
369 }
370
371 /* Ditto for atm_diagnostic_config */
372 void
373 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
374 {
375 if (commPagePtr) {
376 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
377 }
378 }
379
380 /*
381 * Update the commpage data with the state of multiuser mode for
382 * this device. Allowing various services in userspace to avoid
383 * IPC in the (more common) non-multiuser environment.
384 */
385 void
386 commpage_update_multiuser_config(uint32_t multiuser_config)
387 {
388 if (commPagePtr) {
389 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
390 }
391 }
392
393 /*
394 * update the commpage data for
395 * last known value of mach_absolute_time()
396 */
397
398 void
399 commpage_update_mach_approximate_time(uint64_t abstime)
400 {
401 #ifdef CONFIG_MACH_APPROXIMATE_TIME
402 uintptr_t approx_time_base = (uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
403 uint64_t saved_data;
404
405 if (commPagePtr) {
406 saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base,
407 memory_order_relaxed);
408 if (saved_data < abstime) {
409 /* ignoring the success/fail return value assuming that
410 * if the value has been updated since we last read it,
411 * "someone" has a newer timestamp than us and ours is
412 * now invalid. */
413 atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base,
414 &saved_data, abstime, memory_order_relaxed, memory_order_relaxed);
415 }
416 }
417 #else
418 #pragma unused (abstime)
419 #endif
420 }
421
422 /*
423 * update the commpage data's total system sleep time for
424 * userspace call to mach_continuous_time()
425 */
426 void
427 commpage_update_mach_continuous_time(uint64_t sleeptime)
428 {
429 if (commPagePtr) {
430 #ifdef __arm64__
431 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = sleeptime;
432 #else
433 uint64_t *c_time_base = (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
434 uint64_t old;
435 do {
436 old = *c_time_base;
437 } while (!OSCompareAndSwap64(old, sleeptime, c_time_base));
438 #endif /* __arm64__ */
439 }
440 }
441
442 /*
443 * update the commpage's value for the boot time
444 */
445 void
446 commpage_update_boottime(uint64_t value)
447 {
448 if (commPagePtr) {
449 #ifdef __arm64__
450 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET)) = value;
451 #else
452 uint64_t *cp = (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
453 uint64_t old_value;
454 do {
455 old_value = *cp;
456 } while (!OSCompareAndSwap64(old_value, value, cp));
457 #endif /* __arm64__ */
458 }
459 }
460
461 /*
462 * set the commpage's remote time params for
463 * userspace call to mach_bridge_remote_time()
464 */
465 void
466 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
467 {
468 if (commPagePtr) {
469 #ifdef __arm64__
470 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
471 paramsp->base_local_ts = 0;
472 __asm__ volatile ("dmb ish" ::: "memory");
473 paramsp->rate = rate;
474 paramsp->base_remote_ts = base_remote_ts;
475 __asm__ volatile ("dmb ish" ::: "memory");
476 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
477 #else
478 (void)rate;
479 (void)base_local_ts;
480 (void)base_remote_ts;
481 #endif /* __arm64__ */
482 }
483 }
484
485
486 /*
487 * After this counter has incremented, all running CPUs are guaranteed to
488 * have quiesced, i.e. executed serially dependent memory barriers.
489 * This is only tracked for CPUs running in userspace, therefore only useful
490 * outside the kernel.
491 *
492 * Note that you can't know which side of those barriers your read was from,
493 * so you have to observe 2 increments in order to ensure that you saw a
494 * serially dependent barrier chain across all running CPUs.
495 */
496 uint64_t
497 commpage_increment_cpu_quiescent_counter(void)
498 {
499 if (!commPagePtr) {
500 return 0;
501 }
502
503 uint64_t old_gen;
504
505 _Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
506 _COMM_PAGE_RW_OFFSET);
507 /*
508 * On 32bit architectures, double-wide atomic load or stores are a CAS,
509 * so the atomic increment is the most efficient way to increment the
510 * counter.
511 *
512 * On 64bit architectures however, because the update is synchronized by
513 * the cpu mask, relaxed loads and stores is more efficient.
514 */
515 #if __LP64__
516 old_gen = atomic_load_explicit(sched_gen, memory_order_relaxed);
517 atomic_store_explicit(sched_gen, old_gen + 1, memory_order_relaxed);
518 #else
519 old_gen = atomic_fetch_add_explicit(sched_gen, 1, memory_order_relaxed);
520 #endif
521 return old_gen;
522 }