]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/commpage/commpage.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / commpage / commpage.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
52 #include <arm/rtclock.h>
53 #include <libkern/OSAtomic.h>
54 #include <stdatomic.h>
55 #include <kern/remote_time.h>
56 #include <machine/machine_remote_time.h>
57
58 #include <sys/kdebug.h>
59
60 #if CONFIG_ATM
61 #include <atm/atm_internal.h>
62 #endif
63
64 static void commpage_init_cpu_capabilities( void );
65 static int commpage_cpus( void );
66
67 SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0;
68 SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0;
69 SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities = 0;
70
71 /* For sysctl access from BSD side */
72 extern int gARMv81Atomics;
73 extern int gARMv8Crc32;
74 extern int gARMv82FHM;
75
76 void
77 commpage_populate(
78 void)
79 {
80 uint16_t c2;
81 int cpufamily;
82
83 sharedpage_rw_addr = pmap_create_sharedpage();
84 commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS;
85
86 #if __arm64__
87 bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
88 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
89 #else
90 bcopy(_COMM_PAGE32_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
91 MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE32_SIGNATURE_STRING)));
92 #endif
93
94 *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
95
96 commpage_init_cpu_capabilities();
97 commpage_set_timestamp(0, 0, 0, 0, 0);
98
99 if (_cpu_capabilities & kCache32) {
100 c2 = 32;
101 } else if (_cpu_capabilities & kCache64) {
102 c2 = 64;
103 } else if (_cpu_capabilities & kCache128) {
104 c2 = 128;
105 } else {
106 c2 = 0;
107 }
108
109 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
110 *((uint32_t*)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = 1;
111
112 commpage_update_active_cpus();
113 cpufamily = cpuid_get_cpufamily();
114
115 /* machine_info valid after ml_get_max_cpus() */
116 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
117 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
118 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
119 *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
120 *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
121 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
122 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed();
123 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
124
125 #if __arm64__
126 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
127 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
128 #elif (__ARM_ARCH_7K__ >= 2)
129 /* enforce 16KB alignment for watch targets with new ABI */
130 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
131 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
132 #else /* __arm64__ */
133 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
134 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
135 #endif /* __arm64__ */
136
137 commpage_update_timebase();
138 commpage_update_mach_continuous_time(0);
139
140 clock_sec_t secs;
141 clock_usec_t microsecs;
142 clock_get_boottime_microtime(&secs, &microsecs);
143 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
144
145 /*
146 * set commpage approximate time to zero for initialization.
147 * scheduler shall populate correct value before running user thread
148 */
149 *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
150 #ifdef CONFIG_MACH_APPROXIMATE_TIME
151 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
152 #else
153 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
154 #endif
155
156 commpage_update_kdebug_state();
157
158 #if CONFIG_ATM
159 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
160 #endif
161
162
163 *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
164 }
165
166 struct mu {
167 uint64_t m; // magic number
168 int32_t a; // add indicator
169 int32_t s; // shift amount
170 };
171
172 void
173 commpage_set_timestamp(
174 uint64_t tbr,
175 uint64_t secs,
176 uint64_t frac,
177 uint64_t scale,
178 uint64_t tick_per_sec)
179 {
180 new_commpage_timeofday_data_t *commpage_timeofday_datap;
181
182 if (commPagePtr == 0) {
183 return;
184 }
185
186 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
187
188 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
189
190 #if (__ARM_ARCH__ >= 7)
191 __asm__ volatile ("dmb ish");
192 #endif
193 commpage_timeofday_datap->TimeStamp_sec = secs;
194 commpage_timeofday_datap->TimeStamp_frac = frac;
195 commpage_timeofday_datap->Ticks_scale = scale;
196 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
197
198 #if (__ARM_ARCH__ >= 7)
199 __asm__ volatile ("dmb ish");
200 #endif
201 commpage_timeofday_datap->TimeStamp_tick = tbr;
202 }
203
204 /*
205 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
206 */
207
208 void
209 commpage_set_memory_pressure(
210 unsigned int pressure )
211 {
212 if (commPagePtr == 0) {
213 return;
214 }
215 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
216 }
217
218 /*
219 * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc.
220 */
221
222 void
223 commpage_set_spin_count(
224 unsigned int count )
225 {
226 if (count == 0) { /* we test for 0 after decrement, not before */
227 count = 1;
228 }
229
230 if (commPagePtr == 0) {
231 return;
232 }
233 *((uint32_t *)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = count;
234 }
235
236 /*
237 * Determine number of CPUs on this system.
238 */
239 static int
240 commpage_cpus( void )
241 {
242 int cpus;
243
244 cpus = ml_get_max_cpus(); // NB: this call can block
245
246 if (cpus == 0) {
247 panic("commpage cpus==0");
248 }
249 if (cpus > 0xFF) {
250 cpus = 0xFF;
251 }
252
253 return cpus;
254 }
255
256 int
257 _get_cpu_capabilities(void)
258 {
259 return _cpu_capabilities;
260 }
261
262 vm_address_t
263 _get_commpage_priv_address(void)
264 {
265 return sharedpage_rw_addr;
266 }
267
268 /*
269 * Initialize _cpu_capabilities vector
270 */
271 static void
272 commpage_init_cpu_capabilities( void )
273 {
274 uint32_t bits;
275 int cpus;
276 ml_cpu_info_t cpu_info;
277
278 bits = 0;
279 ml_cpu_get_info(&cpu_info);
280
281 switch (cpu_info.cache_line_size) {
282 case 128:
283 bits |= kCache128;
284 break;
285 case 64:
286 bits |= kCache64;
287 break;
288 case 32:
289 bits |= kCache32;
290 break;
291 default:
292 break;
293 }
294 cpus = commpage_cpus();
295
296 if (cpus == 1) {
297 bits |= kUP;
298 }
299
300 bits |= (cpus << kNumCPUsShift);
301
302 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
303
304 #if __ARM_VFP__
305 bits |= kHasVfp;
306 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
307 if (mvfp_info->neon) {
308 bits |= kHasNeon;
309 }
310 if (mvfp_info->neon_hpfp) {
311 bits |= kHasNeonHPFP;
312 }
313 if (mvfp_info->neon_fp16) {
314 bits |= kHasNeonFP16;
315 }
316 #endif
317 #if defined(__arm64__)
318 bits |= kHasFMA;
319 #endif
320 #if __ARM_ENABLE_WFE_
321 #ifdef __arm64__
322 if (arm64_wfe_allowed()) {
323 bits |= kHasEvent;
324 }
325 #else
326 bits |= kHasEvent;
327 #endif
328 #endif
329 #if __ARM_V8_CRYPTO_EXTENSIONS__
330 bits |= kHasARMv8Crypto;
331 #endif
332 #ifdef __arm64__
333 uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
334 if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) == ID_AA64ISAR0_EL1_ATOMIC_8_1) {
335 bits |= kHasARMv81Atomics;
336 gARMv81Atomics = 1;
337 }
338 if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
339 bits |= kHasARMv8Crc32;
340 gARMv8Crc32 = 1;
341 }
342 if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
343 bits |= kHasARMv82FHM;
344 gARMv82FHM = 1;
345 }
346 #endif
347
348
349
350
351 _cpu_capabilities = bits;
352
353 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
354 }
355
356 /*
357 * Updated every time a logical CPU goes offline/online
358 */
359 void
360 commpage_update_active_cpus(void)
361 {
362 if (!commPagePtr) {
363 return;
364 }
365 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = processor_avail_count;
366 }
367
368 /*
369 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
370 */
371 void
372 commpage_update_timebase(void)
373 {
374 if (commPagePtr) {
375 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
376 }
377 }
378
379 /*
380 * Update the commpage with current kdebug state. This currently has bits for
381 * global trace state, and typefilter enablement. It is likely additional state
382 * will be tracked in the future.
383 *
384 * INVARIANT: This value will always be 0 if global tracing is disabled. This
385 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
386 */
387 void
388 commpage_update_kdebug_state(void)
389 {
390 if (commPagePtr) {
391 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
392 }
393 }
394
395 /* Ditto for atm_diagnostic_config */
396 void
397 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
398 {
399 if (commPagePtr) {
400 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
401 }
402 }
403
404 /*
405 * Update the commpage data with the state of multiuser mode for
406 * this device. Allowing various services in userspace to avoid
407 * IPC in the (more common) non-multiuser environment.
408 */
409 void
410 commpage_update_multiuser_config(uint32_t multiuser_config)
411 {
412 if (commPagePtr) {
413 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
414 }
415 }
416
417 /*
418 * update the commpage data for
419 * last known value of mach_absolute_time()
420 */
421
422 void
423 commpage_update_mach_approximate_time(uint64_t abstime)
424 {
425 #ifdef CONFIG_MACH_APPROXIMATE_TIME
426 uintptr_t approx_time_base = (uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
427 uint64_t saved_data;
428
429 if (commPagePtr) {
430 saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base,
431 memory_order_relaxed);
432 if (saved_data < abstime) {
433 /* ignoring the success/fail return value assuming that
434 * if the value has been updated since we last read it,
435 * "someone" has a newer timestamp than us and ours is
436 * now invalid. */
437 atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base,
438 &saved_data, abstime, memory_order_relaxed, memory_order_relaxed);
439 }
440 }
441 #else
442 #pragma unused (abstime)
443 #endif
444 }
445
446 /*
447 * update the commpage data's total system sleep time for
448 * userspace call to mach_continuous_time()
449 */
450 void
451 commpage_update_mach_continuous_time(uint64_t sleeptime)
452 {
453 if (commPagePtr) {
454 #ifdef __arm64__
455 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = sleeptime;
456 #else
457 uint64_t *c_time_base = (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
458 uint64_t old;
459 do {
460 old = *c_time_base;
461 } while (!OSCompareAndSwap64(old, sleeptime, c_time_base));
462 #endif /* __arm64__ */
463 }
464 }
465
466 /*
467 * update the commpage's value for the boot time
468 */
469 void
470 commpage_update_boottime(uint64_t value)
471 {
472 if (commPagePtr) {
473 #ifdef __arm64__
474 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET)) = value;
475 #else
476 uint64_t *cp = (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
477 uint64_t old_value;
478 do {
479 old_value = *cp;
480 } while (!OSCompareAndSwap64(old_value, value, cp));
481 #endif /* __arm64__ */
482 }
483 }
484
485 /*
486 * set the commpage's remote time params for
487 * userspace call to mach_bridge_remote_time()
488 */
489 void
490 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
491 {
492 if (commPagePtr) {
493 #ifdef __arm64__
494 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
495 paramsp->base_local_ts = 0;
496 __asm__ volatile ("dmb ish" ::: "memory");
497 paramsp->rate = rate;
498 paramsp->base_remote_ts = base_remote_ts;
499 __asm__ volatile ("dmb ish" ::: "memory");
500 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
501 #else
502 (void)rate;
503 (void)base_local_ts;
504 (void)base_remote_ts;
505 #endif /* __arm64__ */
506 }
507 }
508
509
510 /*
511 * After this counter has incremented, all running CPUs are guaranteed to
512 * have quiesced, i.e. executed serially dependent memory barriers.
513 * This is only tracked for CPUs running in userspace, therefore only useful
514 * outside the kernel.
515 *
516 * Note that you can't know which side of those barriers your read was from,
517 * so you have to observe 2 increments in order to ensure that you saw a
518 * serially dependent barrier chain across all running CPUs.
519 */
520 uint64_t
521 commpage_increment_cpu_quiescent_counter(void)
522 {
523 if (!commPagePtr) {
524 return 0;
525 }
526
527 uint64_t old_gen;
528
529 _Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
530 _COMM_PAGE_RW_OFFSET);
531 /*
532 * On 32bit architectures, double-wide atomic load or stores are a CAS,
533 * so the atomic increment is the most efficient way to increment the
534 * counter.
535 *
536 * On 64bit architectures however, because the update is synchronized by
537 * the cpu mask, relaxed loads and stores is more efficient.
538 */
539 #if __LP64__
540 old_gen = os_atomic_load(sched_gen, relaxed);
541 os_atomic_store(sched_gen, old_gen + 1, relaxed);
542 #else
543 old_gen = atomic_fetch_add_explicit(sched_gen, 1, memory_order_relaxed);
544 #endif
545 return old_gen;
546 }
547
548 /*
549 * update the commpage with if dtrace user land probes are enabled
550 */
551 void
552 commpage_update_dof(boolean_t enabled)
553 {
554 #if CONFIG_DTRACE
555 *((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
556 #else
557 (void)enabled;
558 #endif
559 }
560
561 /*
562 * update the dyld global config flags
563 */
564 void
565 commpage_update_dyld_flags(uint64_t value)
566 {
567 *((uint64_t*)(_COMM_PAGE_DYLD_SYSTEM_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
568 }