]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/commpage/commpage.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / arm / commpage / commpage.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * @APPLE_FREE_COPYRIGHT@
34 */
35 /*
36 * File: arm/commpage/commpage.c
37 * Purpose: Set up and export a RO/RW page
38 */
39 #include <mach/mach_types.h>
40 #include <mach/machine.h>
41 #include <mach/vm_map.h>
42 #include <machine/cpu_capabilities.h>
43 #include <machine/commpage.h>
44 #include <machine/pmap.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_protos.h>
48 #include <ipc/ipc_port.h>
49 #include <arm/cpuid.h> /* for cpuid_info() & cache_info() */
50 #include <arm/rtclock.h>
51 #include <libkern/OSAtomic.h>
52 #include <stdatomic.h>
53
54 #include <sys/kdebug.h>
55
56 #if CONFIG_ATM
57 #include <atm/atm_internal.h>
58 #endif
59
60 static void commpage_init_cpu_capabilities( void );
61 static int commpage_cpus( void );
62
63 vm_address_t commPagePtr=0;
64 vm_address_t sharedpage_rw_addr = 0;
65 uint32_t _cpu_capabilities = 0;
66
67 extern int gARMv81Atomics; /* For sysctl access from BSD side */
68
69 void
70 commpage_populate(
71 void)
72 {
73 uint16_t c2;
74 int cpufamily;
75
76 sharedpage_rw_addr = pmap_create_sharedpage();
77 commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS;
78
79 *((uint16_t*)(_COMM_PAGE_VERSION+_COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
80
81 commpage_init_cpu_capabilities();
82 commpage_set_timestamp(0, 0, 0, 0, 0);
83
84 if (_cpu_capabilities & kCache32)
85 c2 = 32;
86 else if (_cpu_capabilities & kCache64)
87 c2 = 64;
88 else if (_cpu_capabilities & kCache128)
89 c2 = 128;
90 else
91 c2 = 0;
92
93 *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE+_COMM_PAGE_RW_OFFSET)) = c2;
94 *((uint32_t*)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = 1;
95
96 commpage_update_active_cpus();
97 cpufamily = cpuid_get_cpufamily();
98
99 /* machine_info valid after ml_get_max_cpus() */
100 *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS+_COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
101 *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS+_COMM_PAGE_RW_OFFSET))= (uint8_t) machine_info.logical_cpu_max;
102 *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE+_COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
103 *((uint32_t*)(_COMM_PAGE_CPUFAMILY+_COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
104 *((uint32_t*)(_COMM_PAGE_DEV_FIRM+_COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
105 *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE+_COMM_PAGE_RW_OFFSET)) = user_timebase_allowed();
106 *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK+_COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed();
107 *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
108
109 #if __arm64__
110 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
111 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
112 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
113 /* enforce 16KB alignment for watch targets with new ABI */
114 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
115 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
116 #else /* __arm64__ */
117 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
118 *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT;
119 #endif /* __arm64__ */
120
121 commpage_update_timebase();
122 commpage_update_mach_continuous_time(0);
123
124 clock_sec_t secs;
125 clock_usec_t microsecs;
126 clock_get_boottime_microtime(&secs, &microsecs);
127 commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
128
129 /*
130 * set commpage approximate time to zero for initialization.
131 * scheduler shall populate correct value before running user thread
132 */
133 *((uint64_t *)(_COMM_PAGE_APPROX_TIME+ _COMM_PAGE_RW_OFFSET)) = 0;
134 #ifdef CONFIG_MACH_APPROXIMATE_TIME
135 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 1;
136 #else
137 *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 0;
138 #endif
139
140 commpage_update_kdebug_state();
141
142 #if CONFIG_ATM
143 commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
144 #endif
145
146 }
147
148 struct mu {
149 uint64_t m; // magic number
150 int32_t a; // add indicator
151 int32_t s; // shift amount
152 };
153
154 void
155 commpage_set_timestamp(
156 uint64_t tbr,
157 uint64_t secs,
158 uint64_t frac,
159 uint64_t scale,
160 uint64_t tick_per_sec)
161 {
162 new_commpage_timeofday_data_t *commpage_timeofday_datap;
163
164 if (commPagePtr == 0)
165 return;
166
167 commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA+_COMM_PAGE_RW_OFFSET);
168
169 commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
170
171 #if (__ARM_ARCH__ >= 7)
172 __asm__ volatile("dmb ish");
173 #endif
174 commpage_timeofday_datap->TimeStamp_sec = secs;
175 commpage_timeofday_datap->TimeStamp_frac = frac;
176 commpage_timeofday_datap->Ticks_scale = scale;
177 commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
178
179 #if (__ARM_ARCH__ >= 7)
180 __asm__ volatile("dmb ish");
181 #endif
182 commpage_timeofday_datap->TimeStamp_tick = tbr;
183 }
184
185 /*
186 * Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure()
187 */
188
189 void
190 commpage_set_memory_pressure(
191 unsigned int pressure )
192 {
193 if (commPagePtr == 0)
194 return;
195 *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE+_COMM_PAGE_RW_OFFSET)) = pressure;
196 }
197
198 /*
199 * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc.
200 */
201
202 void
203 commpage_set_spin_count(
204 unsigned int count )
205 {
206 if (count == 0) /* we test for 0 after decrement, not before */
207 count = 1;
208
209 if (commPagePtr == 0)
210 return;
211 *((uint32_t *)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = count;
212 }
213
214 /*
215 * Determine number of CPUs on this system.
216 */
217 static int
218 commpage_cpus( void )
219 {
220 int cpus;
221
222 cpus = ml_get_max_cpus(); // NB: this call can block
223
224 if (cpus == 0)
225 panic("commpage cpus==0");
226 if (cpus > 0xFF)
227 cpus = 0xFF;
228
229 return cpus;
230 }
231
232 /*
233 * Initialize _cpu_capabilities vector
234 */
235 static void
236 commpage_init_cpu_capabilities( void )
237 {
238 uint32_t bits;
239 int cpus;
240 ml_cpu_info_t cpu_info;
241
242 bits = 0;
243 ml_cpu_get_info(&cpu_info);
244
245 switch (cpu_info.cache_line_size) {
246 case 128:
247 bits |= kCache128;
248 break;
249 case 64:
250 bits |= kCache64;
251 break;
252 case 32:
253 bits |= kCache32;
254 break;
255 default:
256 break;
257 }
258 cpus = commpage_cpus();
259
260 if (cpus == 1)
261 bits |= kUP;
262
263 bits |= (cpus << kNumCPUsShift);
264
265 bits |= kFastThreadLocalStorage; // TPIDRURO for TLS
266
267 #if __ARM_VFP__
268 bits |= kHasVfp;
269 arm_mvfp_info_t *mvfp_info = arm_mvfp_info();
270 if (mvfp_info->neon)
271 bits |= kHasNeon;
272 if (mvfp_info->neon_hpfp)
273 bits |= kHasNeonHPFP;
274 #endif
275 #if defined(__arm64__)
276 bits |= kHasFMA;
277 #endif
278 #if __ARM_ENABLE_WFE_
279 #ifdef __arm64__
280 if (arm64_wfe_allowed()) {
281 bits |= kHasEvent;
282 }
283 #else
284 bits |= kHasEvent;
285 #endif
286 #endif
287 #if __ARM_V8_CRYPTO_EXTENSIONS__
288 bits |= kHasARMv8Crypto;
289 #endif
290 #ifdef __arm64__
291 if ((__builtin_arm_rsr64("ID_AA64ISAR0_EL1") & ID_AA64ISAR0_EL1_ATOMIC_MASK) == ID_AA64ISAR0_EL1_ATOMIC_8_1) {
292 bits |= kHasARMv81Atomics;
293 gARMv81Atomics = 1;
294 }
295 #endif
296 _cpu_capabilities = bits;
297
298 *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES+_COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
299 }
300
301 /*
302 * Updated every time a logical CPU goes offline/online
303 */
304 void
305 commpage_update_active_cpus(void)
306 {
307 if (!commPagePtr)
308 return;
309 *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS+_COMM_PAGE_RW_OFFSET)) = processor_avail_count;
310 }
311
312 /*
313 * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
314 */
315 void
316 commpage_update_timebase(void)
317 {
318 if (commPagePtr) {
319 *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET+_COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
320 }
321 }
322
323 /*
324 * Update the commpage with current kdebug state. This currently has bits for
325 * global trace state, and typefilter enablement. It is likely additional state
326 * will be tracked in the future.
327 *
328 * INVARIANT: This value will always be 0 if global tracing is disabled. This
329 * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
330 */
331 void
332 commpage_update_kdebug_state(void)
333 {
334 if (commPagePtr)
335 *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE+_COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state();
336 }
337
338 /* Ditto for atm_diagnostic_config */
339 void
340 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
341 {
342 if (commPagePtr)
343 *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG+_COMM_PAGE_RW_OFFSET)) = diagnostic_config;
344 }
345
346 /*
347 * Update the commpage data with the state of multiuser mode for
348 * this device. Allowing various services in userspace to avoid
349 * IPC in the (more common) non-multiuser environment.
350 */
351 void
352 commpage_update_multiuser_config(uint32_t multiuser_config)
353 {
354 if (commPagePtr)
355 *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG+_COMM_PAGE_RW_OFFSET)) = multiuser_config;
356 }
357
358 /*
359 * update the commpage data for
360 * last known value of mach_absolute_time()
361 */
362
363 void
364 commpage_update_mach_approximate_time(uint64_t abstime)
365 {
366 #ifdef CONFIG_MACH_APPROXIMATE_TIME
367 uintptr_t approx_time_base = (uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
368 uint64_t saved_data;
369
370 if (commPagePtr) {
371 saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base,
372 memory_order_relaxed);
373 if (saved_data < abstime) {
374 /* ignoring the success/fail return value assuming that
375 * if the value has been updated since we last read it,
376 * "someone" has a newer timestamp than us and ours is
377 * now invalid. */
378 atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base,
379 &saved_data, abstime, memory_order_relaxed, memory_order_relaxed);
380 }
381 }
382 #else
383 #pragma unused (abstime)
384 #endif
385 }
386
387 /*
388 * update the commpage data's total system sleep time for
389 * userspace call to mach_continuous_time()
390 */
391 void
392 commpage_update_mach_continuous_time(uint64_t sleeptime)
393 {
394 if (commPagePtr) {
395 #ifdef __arm64__
396 *((uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = sleeptime;
397 #else
398 uint64_t *c_time_base = (uint64_t *)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
399 uint64_t old;
400 do {
401 old = *c_time_base;
402 } while(!OSCompareAndSwap64(old, sleeptime, c_time_base));
403 #endif /* __arm64__ */
404 }
405 }
406
407 /*
408 * update the commpage's value for the boot time
409 */
410 void
411 commpage_update_boottime(uint64_t value)
412 {
413 if (commPagePtr) {
414 #ifdef __arm64__
415 *((uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET)) = value;
416 #else
417 uint64_t *cp = (uint64_t *)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
418 uint64_t old_value;
419 do {
420 old_value = *cp;
421 } while (!OSCompareAndSwap64(old_value, value, cp));
422 #endif /* __arm64__ */
423 }
424 }