]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/commpage/commpage.c
27cfadba928380bed85679e89538c620c731d0aa
[apple/xnu.git] / osfmk / arm / commpage / commpage.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <mach/mach_types.h>
40 #include <mach/machine.h>
41 #include <mach/vm_map.h>
42 #include <machine/cpu_capabilities.h>
43 #include <machine/commpage.h>
44 #include <machine/pmap.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_protos.h>
48 #include <ipc/ipc_port.h>
49 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
50 #include <arm/rtclock.h>
51 #include <libkern/OSAtomic.h>
52 #include <stdatomic.h>
53 #include <kern/remote_time.h>
54 #include <machine/machine_remote_time.h>
55
56 #include <sys/kdebug.h>
57
58 #if CONFIG_ATM
59 #include <atm/atm_internal.h>
60 #endif
61
62 static void commpage_init_cpu_capabilities( void );
63 static int commpage_cpus( void );
64
65 vm_address_t commPagePtr=0;
66 vm_address_t sharedpage_rw_addr = 0;
67 uint32_t _cpu_capabilities = 0;
68
69 extern int gARMv81Atomics; /* For sysctl access from BSD side */
70
71 void
72 commpage_populate(
73 void)
74 {
75 uint16_t c2;
76 int cpufamily;
77
78 sharedpage_rw_addr = pmap_create_sharedpage();
79 commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS;
80
81 *((uint16_t*)(_COMM_PAGE_VERSION+_COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
82
83 commpage_init_cpu_capabilities();
84 commpage_set_timestamp(0, 0, 0, 0, 0);
85
86 if (_cpu_capabilities & kCache32)
87 c2 = 32;
88 else if (_cpu_capabilities & kCache64)
89 c2 = 64;
90 else if (_cpu_capabilities & kCache128)
91 c2 = 128;
92 else
93 c2 = 0;
94
95 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE+_COMM_PAGE_RW_OFFSET)) = c2;
96 *((uint32_t*)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = 1;
97
98 commpage_update_active_cpus();
99 cpufamily = cpuid_get_cpufamily();
100
101 /* machine_info valid after ml_get_max_cpus() */
102 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS+_COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
103 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS+_COMM_PAGE_RW_OFFSET))= (uint8_t) machine_info.logical_cpu_max;
104 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE+_COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
105 *((uint32_t*)(_COMM_PAGE_CPUFAMILY+_COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
106 *((uint32_t*)(_COMM_PAGE_DEV_FIRM+_COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
107 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE+_COMM_PAGE_RW_OFFSET)) = user_timebase_allowed();
108 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK+_COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed();
109 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
110
111 #if __arm64__
112 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
113 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
114 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
115 /* enforce 16KB alignment for watch targets with new ABI */
116 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
117 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
118 #else /* __arm64__ */
119 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
120 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
121 #endif /* __arm64__ */
122
123 commpage_update_timebase();
124 commpage_update_mach_continuous_time(0);
125
126 clock_sec_t secs;
127 clock_usec_t microsecs;
128 clock_get_boottime_microtime(&secs, &microsecs);
129 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
130
131 /*
132 * set commpage approximate time to zero for initialization.
133 * scheduler shall populate correct value before running user thread
134 */
135 *((uint64_t *)(_COMM_PAGE_APPROX_TIME+ _COMM_PAGE_RW_OFFSET)) = 0;
136 #ifdef CONFIG_MACH_APPROXIMATE_TIME
137 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 1;
138 #else
139 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 0;
140 #endif
141
142 commpage_update_kdebug_state();
143
144 #if CONFIG_ATM
145 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
146 #endif
147
148 }
149
150 struct mu {
151 uint64_t m; // magic number
152 int32_t a; // add indicator
153 int32_t s; // shift amount
154 };
155
156 void
157 commpage_set_timestamp(
158 uint64_t tbr,
159 uint64_t secs,
160 uint64_t frac,
161 uint64_t scale,
162 uint64_t tick_per_sec)
163 {
164 new_commpage_timeofday_data_t *commpage_timeofday_datap;
165
166 if (commPagePtr == 0)
167 return;
168
169 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA+_COMM_PAGE_RW_OFFSET);
170
171 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
172
173 #if (__ARM_ARCH__ >= 7)
174 __asm__ volatile("dmb ish");
175 #endif
176 commpage_timeofday_datap->TimeStamp_sec = secs;
177 commpage_timeofday_datap->TimeStamp_frac = frac;
178 commpage_timeofday_datap->Ticks_scale = scale;
179 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
180
181 #if (__ARM_ARCH__ >= 7)
182 __asm__ volatile("dmb ish");
183 #endif
184 commpage_timeofday_datap->TimeStamp_tick = tbr;
185 }
186
187 /*
188 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
189 */
190
191 void
192 commpage_set_memory_pressure(
193 unsigned int pressure )
194 {
195 if (commPagePtr == 0)
196 return;
197 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE+_COMM_PAGE_RW_OFFSET)) = pressure;
198 }
199
200 /*
201 * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc.
202 */
203
204 void
205 commpage_set_spin_count(
206 unsigned int count )
207 {
208 if (count == 0) /* we test for 0 after decrement, not before */
209 count = 1;
210
211 if (commPagePtr == 0)
212 return;
213 *((uint32_t *)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = count;
214 }
215
216 /*
217 * Determine number of CPUs on this system.
218 */
219 static int
220 commpage_cpus( void )
221 {
222 int cpus;
223
224 cpus = ml_get_max_cpus(); // NB: this call can block
225
226 if (cpus == 0)
227 panic("commpage cpus==0");
228 if (cpus > 0xFF)
229 cpus = 0xFF;
230
231 return cpus;
232 }
233
234 /*
235 * Initialize _cpu_capabilities vector
236 */
237 static void
238 commpage_init_cpu_capabilities( void )
239 {
240 uint32_t bits;
241 int cpus;
242 ml_cpu_info_t cpu_info;
243
244 bits = 0;
245 ml_cpu_get_info(&cpu_info);
246
247 switch (cpu_info.cache_line_size) {
248 case 128:
249 bits |= kCache128;
250 break;
251 case 64:
252 bits |= kCache64;
253 break;
254 case 32:
255 bits |= kCache32;
256 break;
257 default:
258 break;
259 }
260 cpus = commpage_cpus();
261
262 if (cpus == 1)
263 bits |= kUP;
264
265 bits |= (cpus << kNumCPUsShift);
266
267 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
268
269 #if __ARM_VFP__
270 bits |= kHasVfp;
271 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
272 if (mvfp_info->neon)
273 bits |= kHasNeon;
274 if (mvfp_info->neon_hpfp)
275 bits |= kHasNeonHPFP;
276 #endif
277 #if defined(__arm64__)
278 bits |= kHasFMA;
279 #endif
280 #if __ARM_ENABLE_WFE_
281 #ifdef __arm64__
282 if (arm64_wfe_allowed()) {
283 bits |= kHasEvent;
284 }
285 #else
286 bits |= kHasEvent;
287 #endif
288 #endif
289 #if __ARM_V8_CRYPTO_EXTENSIONS__
290 bits |= kHasARMv8Crypto;
291 #endif
292 #ifdef __arm64__
293 if ((__builtin_arm_rsr64("ID_AA64ISAR0_EL1") & ID_AA64ISAR0_EL1_ATOMIC_MASK) == ID_AA64ISAR0_EL1_ATOMIC_8_1) {
294 bits |= kHasARMv81Atomics;
295 gARMv81Atomics = 1;
296 }
297 #endif
298 _cpu_capabilities = bits;
299
300 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES+_COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
301 }
302
303 /*
304 * Updated every time a logical CPU goes offline/online
305 */
306 void
307 commpage_update_active_cpus(void)
308 {
309 if (!commPagePtr)
310 return;
311 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS+_COMM_PAGE_RW_OFFSET)) = processor_avail_count;
312 }
313
314 /*
315 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
316 */
317 void
318 commpage_update_timebase(void)
319 {
320 if (commPagePtr) {
321 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET+_COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
322 }
323 }
324
325 /*
326 * Update the commpage with current kdebug state. This currently has bits for
327 * global trace state, and typefilter enablement. It is likely additional state
328 * will be tracked in the future.
329 *
330 * INVARIANT: This value will always be 0 if global tracing is disabled. This
331 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
332 */
333 void
334 commpage_update_kdebug_state(void)
335 {
336 if (commPagePtr)
337 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE+_COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
338 }
339
340 /* Ditto for atm_diagnostic_config */
341 void
342 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
343 {
344 if (commPagePtr)
345 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG+_COMM_PAGE_RW_OFFSET)) = diagnostic_config;
346 }
347
348 /*
349 * Update the commpage data with the state of multiuser mode for
350 * this device. Allowing various services in userspace to avoid
351 * IPC in the (more common) non-multiuser environment.
352 */
353 void
354 commpage_update_multiuser_config(uint32_t multiuser_config)
355 {
356 if (commPagePtr)
357 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG+_COMM_PAGE_RW_OFFSET)) = multiuser_config;
358 }
359
360 /*
361 * update the commpage data for
362 * last known value of mach_absolute_time()
363 */
364
365 void
366 commpage_update_mach_approximate_time(uint64_t abstime)
367 {
368 #ifdef CONFIG_MACH_APPROXIMATE_TIME
369 uintptr_t approx_time_base = (uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
370 uint64_t saved_data;
371
372 if (commPagePtr) {
373 saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base,
374 memory_order_relaxed);
375 if (saved_data < abstime) {
376 /* ignoring the success/fail return value assuming that
377 * if the value has been updated since we last read it,
378 * "someone" has a newer timestamp than us and ours is
379 * now invalid. */
380 atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base,
381 &saved_data, abstime, memory_order_relaxed, memory_order_relaxed);
382 }
383 }
384 #else
385 #pragma unused (abstime)
386 #endif
387 }
388
389 /*
390 * update the commpage data's total system sleep time for
391 * userspace call to mach_continuous_time()
392 */
393 void
394 commpage_update_mach_continuous_time(uint64_t sleeptime)
395 {
396 if (commPagePtr) {
397 #ifdef __arm64__
398 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = sleeptime;
399 #else
400 uint64_t *c_time_base = (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
401 uint64_t old;
402 do {
403 old = *c_time_base;
404 } while(!OSCompareAndSwap64(old, sleeptime, c_time_base));
405 #endif /* __arm64__ */
406 }
407 }
408
409 /*
410 * update the commpage's value for the boot time
411 */
412 void
413 commpage_update_boottime(uint64_t value)
414 {
415 if (commPagePtr) {
416 #ifdef __arm64__
417 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET)) = value;
418 #else
419 uint64_t *cp = (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
420 uint64_t old_value;
421 do {
422 old_value = *cp;
423 } while (!OSCompareAndSwap64(old_value, value, cp));
424 #endif /* __arm64__ */
425 }
426 }
427
428 /*
429 * set the commpage's remote time params for
430 * userspace call to mach_bridge_remote_time()
431 */
432 void
433 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
434 {
435 if (commPagePtr) {
436 #ifdef __arm64__
437 struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
438 paramsp->base_local_ts = 0;
439 __asm__ volatile("dmb ish" ::: "memory");
440 paramsp->rate = rate;
441 paramsp->base_remote_ts = base_remote_ts;
442 __asm__ volatile("dmb ish" ::: "memory");
443 paramsp->base_local_ts = base_local_ts; //This will act as a generation count
444 #else
445 (void)rate;
446 (void)base_local_ts;
447 (void)base_remote_ts;
448 #endif /* __arm64__ */
449 }
450 }