]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* | |
30 | * Here's what to do if you want to add a new routine to the comm page: | |
31 | * | |
32 | * 1. Add a definition for it's address in osfmk/i386/cpu_capabilities.h, | |
33 | * being careful to reserve room for future expansion. | |
34 | * | |
35 | * 2. Write one or more versions of the routine, each with it's own | |
36 | * commpage_descriptor. The tricky part is getting the "special", | |
37 | * "musthave", and "canthave" fields right, so that exactly one | |
38 | * version of the routine is selected for every machine. | |
39 | * The source files should be in osfmk/i386/commpage/. | |
40 | * | |
41 | * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" | |
42 | * array in osfmk/i386/commpage/commpage_asm.s. There are two | |
43 | * arrays, one for the 32-bit and one for the 64-bit commpage. | |
44 | * | |
45 | * 4. Write the code in Libc to use the new routine. | |
46 | */ | |
47 | ||
48 | #include <mach/mach_types.h> | |
49 | #include <mach/machine.h> | |
50 | #include <mach/vm_map.h> | |
51 | #include <mach/mach_vm.h> | |
52 | #include <mach/machine.h> | |
53 | #include <i386/cpuid.h> | |
54 | #include <i386/tsc.h> | |
55 | #include <i386/rtclock_protos.h> | |
56 | #include <i386/cpu_data.h> | |
57 | #include <i386/machine_routines.h> | |
58 | #include <i386/misc_protos.h> | |
59 | #include <i386/cpuid.h> | |
60 | #include <machine/cpu_capabilities.h> | |
61 | #include <machine/commpage.h> | |
62 | #include <machine/pmap.h> | |
63 | #include <vm/vm_kern.h> | |
64 | #include <vm/vm_map.h> | |
65 | ||
66 | #include <ipc/ipc_port.h> | |
67 | ||
68 | #include <kern/page_decrypt.h> | |
69 | #include <kern/processor.h> | |
70 | ||
71 | #include <sys/kdebug.h> | |
72 | ||
73 | #if CONFIG_ATM | |
74 | #include <atm/atm_internal.h> | |
75 | #endif | |
76 | ||
77 | /* the lists of commpage routines are in commpage_asm.s */ | |
78 | extern commpage_descriptor* commpage_32_routines[]; | |
79 | extern commpage_descriptor* commpage_64_routines[]; | |
80 | ||
81 | extern vm_map_t commpage32_map; // the shared submap, set up in vm init | |
82 | extern vm_map_t commpage64_map; // the shared submap, set up in vm init | |
83 | extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init | |
84 | extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init | |
85 | ||
86 | ||
87 | char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage | |
88 | char *commPagePtr64 = NULL; // ...and of 64-bit commpage | |
89 | char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage | |
90 | char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage | |
91 | ||
92 | uint64_t _cpu_capabilities = 0; // define the capability vector | |
93 | ||
94 | typedef uint32_t commpage_address_t; | |
95 | ||
96 | static commpage_address_t next; // next available address in comm page | |
97 | ||
98 | static char *commPagePtr; // virtual addr in kernel map of commpage we are working on | |
99 | static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map | |
100 | ||
101 | static commpage_time_data *time_data32 = NULL; | |
102 | static commpage_time_data *time_data64 = NULL; | |
103 | ||
104 | decl_simple_lock_data(static,commpage_active_cpus_lock); | |
105 | ||
106 | /* Allocate the commpage and add to the shared submap created by vm: | |
107 | * 1. allocate a page in the kernel map (RW) | |
108 | * 2. wire it down | |
109 | * 3. make a memory entry out of it | |
110 | * 4. map that entry into the shared comm region map (R-only) | |
111 | */ | |
112 | ||
113 | static void* | |
114 | commpage_allocate( | |
115 | vm_map_t submap, // commpage32_map or commpage_map64 | |
116 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED | |
117 | vm_prot_t uperm) | |
118 | { | |
119 | vm_offset_t kernel_addr = 0; // address of commpage in kernel map | |
120 | vm_offset_t zero = 0; | |
121 | vm_size_t size = area_used; // size actually populated | |
122 | vm_map_entry_t entry; | |
123 | ipc_port_t handle; | |
124 | kern_return_t kr; | |
125 | ||
126 | if (submap == NULL) | |
127 | panic("commpage submap is null"); | |
128 | ||
129 | if ((kr = vm_map(kernel_map, | |
130 | &kernel_addr, | |
131 | area_used, | |
132 | 0, | |
133 | VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK), | |
134 | NULL, | |
135 | 0, | |
136 | FALSE, | |
137 | VM_PROT_ALL, | |
138 | VM_PROT_ALL, | |
139 | VM_INHERIT_NONE))) | |
140 | panic("cannot allocate commpage %d", kr); | |
141 | ||
142 | if ((kr = vm_map_wire(kernel_map, | |
143 | kernel_addr, | |
144 | kernel_addr+area_used, | |
145 | VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), | |
146 | FALSE))) | |
147 | panic("cannot wire commpage: %d", kr); | |
148 | ||
149 | /* | |
150 | * Now that the object is created and wired into the kernel map, mark it so that no delay | |
151 | * copy-on-write will ever be performed on it as a result of mapping it into user-space. | |
152 | * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and | |
153 | * that would be a real disaster. | |
154 | * | |
155 | * JMM - What we really need is a way to create it like this in the first place. | |
156 | */ | |
157 | if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) | |
158 | panic("cannot find commpage entry %d", kr); | |
159 | VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
160 | ||
161 | if ((kr = mach_make_memory_entry( kernel_map, // target map | |
162 | &size, // size | |
163 | kernel_addr, // offset (address in kernel map) | |
164 | uperm, // protections as specified | |
165 | &handle, // this is the object handle we get | |
166 | NULL ))) // parent_entry (what is this?) | |
167 | panic("cannot make entry for commpage %d", kr); | |
168 | ||
169 | if ((kr = vm_map_64( submap, // target map (shared submap) | |
170 | &zero, // address (map into 1st page in submap) | |
171 | area_used, // size | |
172 | 0, // mask | |
173 | VM_FLAGS_FIXED, // flags (it must be 1st page in submap) | |
174 | handle, // port is the memory entry we just made | |
175 | 0, // offset (map 1st page in memory entry) | |
176 | FALSE, // copy | |
177 | uperm, // cur_protection (R-only in user map) | |
178 | uperm, // max_protection | |
179 | VM_INHERIT_SHARE ))) // inheritance | |
180 | panic("cannot map commpage %d", kr); | |
181 | ||
182 | ipc_port_release(handle); | |
183 | /* Make the kernel mapping non-executable. This cannot be done | |
184 | * at the time of map entry creation as mach_make_memory_entry | |
185 | * cannot handle disjoint permissions at this time. | |
186 | */ | |
187 | kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE); | |
188 | assert (kr == KERN_SUCCESS); | |
189 | ||
190 | return (void*)(intptr_t)kernel_addr; // return address in kernel map | |
191 | } | |
192 | ||
193 | /* Get address (in kernel map) of a commpage field. */ | |
194 | ||
195 | static void* | |
196 | commpage_addr_of( | |
197 | commpage_address_t addr_at_runtime ) | |
198 | { | |
199 | return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); | |
200 | } | |
201 | ||
202 | /* Determine number of CPUs on this system. We cannot rely on | |
203 | * machine_info.max_cpus this early in the boot. | |
204 | */ | |
205 | static int | |
206 | commpage_cpus( void ) | |
207 | { | |
208 | int cpus; | |
209 | ||
210 | cpus = ml_get_max_cpus(); // NB: this call can block | |
211 | ||
212 | if (cpus == 0) | |
213 | panic("commpage cpus==0"); | |
214 | if (cpus > 0xFF) | |
215 | cpus = 0xFF; | |
216 | ||
217 | return cpus; | |
218 | } | |
219 | ||
220 | /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */ | |
221 | ||
222 | static void | |
223 | commpage_init_cpu_capabilities( void ) | |
224 | { | |
225 | uint64_t bits; | |
226 | int cpus; | |
227 | ml_cpu_info_t cpu_info; | |
228 | ||
229 | bits = 0; | |
230 | ml_cpu_get_info(&cpu_info); | |
231 | ||
232 | switch (cpu_info.vector_unit) { | |
233 | case 9: | |
234 | bits |= kHasAVX1_0; | |
235 | /* fall thru */ | |
236 | case 8: | |
237 | bits |= kHasSSE4_2; | |
238 | /* fall thru */ | |
239 | case 7: | |
240 | bits |= kHasSSE4_1; | |
241 | /* fall thru */ | |
242 | case 6: | |
243 | bits |= kHasSupplementalSSE3; | |
244 | /* fall thru */ | |
245 | case 5: | |
246 | bits |= kHasSSE3; | |
247 | /* fall thru */ | |
248 | case 4: | |
249 | bits |= kHasSSE2; | |
250 | /* fall thru */ | |
251 | case 3: | |
252 | bits |= kHasSSE; | |
253 | /* fall thru */ | |
254 | case 2: | |
255 | bits |= kHasMMX; | |
256 | default: | |
257 | break; | |
258 | } | |
259 | switch (cpu_info.cache_line_size) { | |
260 | case 128: | |
261 | bits |= kCache128; | |
262 | break; | |
263 | case 64: | |
264 | bits |= kCache64; | |
265 | break; | |
266 | case 32: | |
267 | bits |= kCache32; | |
268 | break; | |
269 | default: | |
270 | break; | |
271 | } | |
272 | cpus = commpage_cpus(); // how many CPUs do we have | |
273 | ||
274 | bits |= (cpus << kNumCPUsShift); | |
275 | ||
276 | bits |= kFastThreadLocalStorage; // we use %gs for TLS | |
277 | ||
278 | #define setif(_bits, _bit, _condition) \ | |
279 | if (_condition) _bits |= _bit | |
280 | ||
281 | setif(bits, kUP, cpus == 1); | |
282 | setif(bits, k64Bit, cpu_mode_is64bit()); | |
283 | setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); | |
284 | ||
285 | setif(bits, kHasAES, cpuid_features() & | |
286 | CPUID_FEATURE_AES); | |
287 | setif(bits, kHasF16C, cpuid_features() & | |
288 | CPUID_FEATURE_F16C); | |
289 | setif(bits, kHasRDRAND, cpuid_features() & | |
290 | CPUID_FEATURE_RDRAND); | |
291 | setif(bits, kHasFMA, cpuid_features() & | |
292 | CPUID_FEATURE_FMA); | |
293 | ||
294 | setif(bits, kHasBMI1, cpuid_leaf7_features() & | |
295 | CPUID_LEAF7_FEATURE_BMI1); | |
296 | setif(bits, kHasBMI2, cpuid_leaf7_features() & | |
297 | CPUID_LEAF7_FEATURE_BMI2); | |
298 | setif(bits, kHasRTM, cpuid_leaf7_features() & | |
299 | CPUID_LEAF7_FEATURE_RTM); | |
300 | setif(bits, kHasHLE, cpuid_leaf7_features() & | |
301 | CPUID_LEAF7_FEATURE_HLE); | |
302 | setif(bits, kHasAVX2_0, cpuid_leaf7_features() & | |
303 | CPUID_LEAF7_FEATURE_AVX2); | |
304 | setif(bits, kHasRDSEED, cpuid_features() & | |
305 | CPUID_LEAF7_FEATURE_RDSEED); | |
306 | setif(bits, kHasADX, cpuid_features() & | |
307 | CPUID_LEAF7_FEATURE_ADX); | |
308 | ||
309 | setif(bits, kHasMPX, cpuid_leaf7_features() & | |
310 | CPUID_LEAF7_FEATURE_MPX); | |
311 | setif(bits, kHasSGX, cpuid_leaf7_features() & | |
312 | CPUID_LEAF7_FEATURE_SGX); | |
313 | uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); | |
314 | setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && | |
315 | (cpuid_leaf7_features() & | |
316 | CPUID_LEAF7_FEATURE_ERMS)); | |
317 | ||
318 | _cpu_capabilities = bits; // set kernel version for use by drivers etc | |
319 | } | |
320 | ||
321 | /* initialize the approx_time_supported flag and set the approx time to 0. | |
322 | * Called during initial commpage population. | |
323 | */ | |
324 | static void | |
325 | commpage_mach_approximate_time_init(void) | |
326 | { | |
327 | char *cp = commPagePtr32; | |
328 | uint8_t supported; | |
329 | ||
330 | #ifdef CONFIG_MACH_APPROXIMATE_TIME | |
331 | supported = 1; | |
332 | #else | |
333 | supported = 0; | |
334 | #endif | |
335 | if ( cp ) { | |
336 | cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_BASE_ADDRESS); | |
337 | *(boolean_t *)cp = supported; | |
338 | } | |
339 | ||
340 | cp = commPagePtr64; | |
341 | if ( cp ) { | |
342 | cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_START_ADDRESS); | |
343 | *(boolean_t *)cp = supported; | |
344 | } | |
345 | commpage_update_mach_approximate_time(0); | |
346 | } | |
347 | ||
348 | static void | |
349 | commpage_mach_continuous_time_init(void) | |
350 | { | |
351 | commpage_update_mach_continuous_time(0); | |
352 | } | |
353 | ||
354 | static void | |
355 | commpage_boottime_init(void) | |
356 | { | |
357 | clock_sec_t secs; | |
358 | clock_usec_t microsecs; | |
359 | clock_get_boottime_microtime(&secs, µsecs); | |
360 | commpage_update_boottime(secs * USEC_PER_SEC + microsecs); | |
361 | } | |
362 | ||
363 | uint64_t | |
364 | _get_cpu_capabilities(void) | |
365 | { | |
366 | return _cpu_capabilities; | |
367 | } | |
368 | ||
369 | /* Copy data into commpage. */ | |
370 | ||
371 | static void | |
372 | commpage_stuff( | |
373 | commpage_address_t address, | |
374 | const void *source, | |
375 | int length ) | |
376 | { | |
377 | void *dest = commpage_addr_of(address); | |
378 | ||
379 | if (address < next) | |
380 | panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next); | |
381 | ||
382 | bcopy(source,dest,length); | |
383 | ||
384 | next = address + length; | |
385 | } | |
386 | ||
387 | /* Copy a routine into comm page if it matches running machine. | |
388 | */ | |
389 | static void | |
390 | commpage_stuff_routine( | |
391 | commpage_descriptor *rd ) | |
392 | { | |
393 | commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); | |
394 | } | |
395 | ||
396 | /* Fill in the 32- or 64-bit commpage. Called once for each. | |
397 | */ | |
398 | ||
399 | static void | |
400 | commpage_populate_one( | |
401 | vm_map_t submap, // commpage32_map or compage64_map | |
402 | char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 | |
403 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED | |
404 | commpage_address_t base_offset, // will become commPageBaseOffset | |
405 | commpage_time_data** time_data, // &time_data32 or &time_data64 | |
406 | const char* signature, // "commpage 32-bit" or "commpage 64-bit" | |
407 | vm_prot_t uperm) | |
408 | { | |
409 | uint8_t c1; | |
410 | uint16_t c2; | |
411 | int c4; | |
412 | uint64_t c8; | |
413 | uint32_t cfamily; | |
414 | short version = _COMM_PAGE_THIS_VERSION; | |
415 | ||
416 | next = 0; | |
417 | commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm ); | |
418 | *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 | |
419 | commPageBaseOffset = base_offset; | |
420 | ||
421 | *time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START ); | |
422 | ||
423 | /* Stuff in the constants. We move things into the comm page in strictly | |
424 | * ascending order, so we can check for overlap and panic if so. | |
425 | * Note: the 32-bit cpu_capabilities vector is retained in addition to | |
426 | * the expanded 64-bit vector. | |
427 | */ | |
428 | commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature))); | |
429 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities)); | |
430 | commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); | |
431 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t)); | |
432 | ||
433 | c2 = 32; // default | |
434 | if (_cpu_capabilities & kCache64) | |
435 | c2 = 64; | |
436 | else if (_cpu_capabilities & kCache128) | |
437 | c2 = 128; | |
438 | commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); | |
439 | ||
440 | c4 = MP_SPIN_TRIES; | |
441 | commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4); | |
442 | ||
443 | /* machine_info valid after ml_get_max_cpus() */ | |
444 | c1 = machine_info.physical_cpu_max; | |
445 | commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1); | |
446 | c1 = machine_info.logical_cpu_max; | |
447 | commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1); | |
448 | ||
449 | c8 = ml_cpu_cache_size(0); | |
450 | commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8); | |
451 | ||
452 | cfamily = cpuid_info()->cpuid_cpufamily; | |
453 | commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); | |
454 | ||
455 | if (next > _COMM_PAGE_END) | |
456 | panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr); | |
457 | ||
458 | } | |
459 | ||
460 | ||
461 | /* Fill in commpages: called once, during kernel initialization, from the | |
462 | * startup thread before user-mode code is running. | |
463 | * | |
464 | * See the top of this file for a list of what you have to do to add | |
465 | * a new routine to the commpage. | |
466 | */ | |
467 | ||
468 | void | |
469 | commpage_populate( void ) | |
470 | { | |
471 | commpage_init_cpu_capabilities(); | |
472 | ||
473 | commpage_populate_one( commpage32_map, | |
474 | &commPagePtr32, | |
475 | _COMM_PAGE32_AREA_USED, | |
476 | _COMM_PAGE32_BASE_ADDRESS, | |
477 | &time_data32, | |
478 | "commpage 32-bit", | |
479 | VM_PROT_READ); | |
480 | #ifndef __LP64__ | |
481 | pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, | |
482 | _COMM_PAGE32_AREA_USED/INTEL_PGBYTES); | |
483 | #endif | |
484 | time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ | |
485 | ||
486 | if (_cpu_capabilities & k64Bit) { | |
487 | commpage_populate_one( commpage64_map, | |
488 | &commPagePtr64, | |
489 | _COMM_PAGE64_AREA_USED, | |
490 | _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ | |
491 | &time_data64, | |
492 | "commpage 64-bit", | |
493 | VM_PROT_READ); | |
494 | #ifndef __LP64__ | |
495 | pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, | |
496 | _COMM_PAGE64_AREA_USED/INTEL_PGBYTES); | |
497 | #endif | |
498 | } | |
499 | ||
500 | simple_lock_init(&commpage_active_cpus_lock, 0); | |
501 | ||
502 | commpage_update_active_cpus(); | |
503 | commpage_mach_approximate_time_init(); | |
504 | commpage_mach_continuous_time_init(); | |
505 | commpage_boottime_init(); | |
506 | rtc_nanotime_init_commpage(); | |
507 | commpage_update_kdebug_state(); | |
508 | #if CONFIG_ATM | |
509 | commpage_update_atm_diagnostic_config(atm_get_diagnostic_config()); | |
510 | #endif | |
511 | } | |
512 | ||
513 | /* Fill in the common routines during kernel initialization. | |
514 | * This is called before user-mode code is running. | |
515 | */ | |
516 | void commpage_text_populate( void ){ | |
517 | commpage_descriptor **rd; | |
518 | ||
519 | next = 0; | |
520 | commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); | |
521 | commPageTextPtr32 = commPagePtr; | |
522 | ||
523 | char *cptr = commPagePtr; | |
524 | int i=0; | |
525 | for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){ | |
526 | cptr[i]=0xCC; | |
527 | } | |
528 | ||
529 | commPageBaseOffset = _COMM_PAGE_TEXT_START; | |
530 | for (rd = commpage_32_routines; *rd != NULL; rd++) { | |
531 | commpage_stuff_routine(*rd); | |
532 | } | |
533 | ||
534 | #ifndef __LP64__ | |
535 | pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, | |
536 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); | |
537 | #endif | |
538 | ||
539 | if (_cpu_capabilities & k64Bit) { | |
540 | next = 0; | |
541 | commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); | |
542 | commPageTextPtr64 = commPagePtr; | |
543 | ||
544 | cptr=commPagePtr; | |
545 | for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){ | |
546 | cptr[i]=0xCC; | |
547 | } | |
548 | ||
549 | for (rd = commpage_64_routines; *rd !=NULL; rd++) { | |
550 | commpage_stuff_routine(*rd); | |
551 | } | |
552 | ||
553 | #ifndef __LP64__ | |
554 | pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, | |
555 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); | |
556 | #endif | |
557 | } | |
558 | ||
559 | if (next > _COMM_PAGE_TEXT_END) | |
560 | panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr); | |
561 | ||
562 | } | |
563 | ||
564 | /* Update commpage nanotime information. | |
565 | * | |
566 | * This routine must be serialized by some external means, ie a lock. | |
567 | */ | |
568 | ||
569 | void | |
570 | commpage_set_nanotime( | |
571 | uint64_t tsc_base, | |
572 | uint64_t ns_base, | |
573 | uint32_t scale, | |
574 | uint32_t shift ) | |
575 | { | |
576 | commpage_time_data *p32 = time_data32; | |
577 | commpage_time_data *p64 = time_data64; | |
578 | static uint32_t generation = 0; | |
579 | uint32_t next_gen; | |
580 | ||
581 | if (p32 == NULL) /* have commpages been allocated yet? */ | |
582 | return; | |
583 | ||
584 | if ( generation != p32->nt_generation ) | |
585 | panic("nanotime trouble 1"); /* possibly not serialized */ | |
586 | if ( ns_base < p32->nt_ns_base ) | |
587 | panic("nanotime trouble 2"); | |
588 | if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) ) | |
589 | panic("nanotime trouble 3"); | |
590 | ||
591 | next_gen = ++generation; | |
592 | if (next_gen == 0) | |
593 | next_gen = ++generation; | |
594 | ||
595 | p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
596 | p64->nt_generation = 0; | |
597 | ||
598 | p32->nt_tsc_base = tsc_base; | |
599 | p64->nt_tsc_base = tsc_base; | |
600 | ||
601 | p32->nt_ns_base = ns_base; | |
602 | p64->nt_ns_base = ns_base; | |
603 | ||
604 | p32->nt_scale = scale; | |
605 | p64->nt_scale = scale; | |
606 | ||
607 | p32->nt_shift = shift; | |
608 | p64->nt_shift = shift; | |
609 | ||
610 | p32->nt_generation = next_gen; /* mark data as valid */ | |
611 | p64->nt_generation = next_gen; | |
612 | } | |
613 | ||
614 | ||
615 | /* Disable commpage gettimeofday(), forcing commpage to call through to the kernel. */ | |
616 | ||
617 | void | |
618 | commpage_disable_timestamp( void ) | |
619 | { | |
620 | time_data32->gtod_generation = 0; | |
621 | time_data64->gtod_generation = 0; | |
622 | } | |
623 | ||
624 | ||
625 | /* Update commpage gettimeofday() information. As with nanotime(), we interleave | |
626 | * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync | |
627 | * between the two environments. | |
628 | * | |
629 | * This routine must be serializeed by some external means, ie a lock. | |
630 | */ | |
631 | ||
632 | void | |
633 | commpage_set_timestamp( | |
634 | uint64_t abstime, | |
635 | uint64_t secs ) | |
636 | { | |
637 | commpage_time_data *p32 = time_data32; | |
638 | commpage_time_data *p64 = time_data64; | |
639 | static uint32_t generation = 0; | |
640 | uint32_t next_gen; | |
641 | ||
642 | next_gen = ++generation; | |
643 | if (next_gen == 0) | |
644 | next_gen = ++generation; | |
645 | ||
646 | p32->gtod_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
647 | p64->gtod_generation = 0; | |
648 | ||
649 | p32->gtod_ns_base = abstime; | |
650 | p64->gtod_ns_base = abstime; | |
651 | ||
652 | p32->gtod_sec_base = secs; | |
653 | p64->gtod_sec_base = secs; | |
654 | ||
655 | p32->gtod_generation = next_gen; /* mark data as valid */ | |
656 | p64->gtod_generation = next_gen; | |
657 | } | |
658 | ||
659 | ||
660 | /* Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure() */ | |
661 | ||
662 | void | |
663 | commpage_set_memory_pressure( | |
664 | unsigned int pressure ) | |
665 | { | |
666 | char *cp; | |
667 | uint32_t *ip; | |
668 | ||
669 | cp = commPagePtr32; | |
670 | if ( cp ) { | |
671 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS); | |
672 | ip = (uint32_t*) (void *) cp; | |
673 | *ip = (uint32_t) pressure; | |
674 | } | |
675 | ||
676 | cp = commPagePtr64; | |
677 | if ( cp ) { | |
678 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS); | |
679 | ip = (uint32_t*) (void *) cp; | |
680 | *ip = (uint32_t) pressure; | |
681 | } | |
682 | ||
683 | } | |
684 | ||
685 | ||
686 | /* Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. */ | |
687 | ||
688 | void | |
689 | commpage_set_spin_count( | |
690 | unsigned int count ) | |
691 | { | |
692 | char *cp; | |
693 | uint32_t *ip; | |
694 | ||
695 | if (count == 0) /* we test for 0 after decrement, not before */ | |
696 | count = 1; | |
697 | ||
698 | cp = commPagePtr32; | |
699 | if ( cp ) { | |
700 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); | |
701 | ip = (uint32_t*) (void *) cp; | |
702 | *ip = (uint32_t) count; | |
703 | } | |
704 | ||
705 | cp = commPagePtr64; | |
706 | if ( cp ) { | |
707 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); | |
708 | ip = (uint32_t*) (void *) cp; | |
709 | *ip = (uint32_t) count; | |
710 | } | |
711 | ||
712 | } | |
713 | ||
714 | /* Updated every time a logical CPU goes offline/online */ | |
715 | void | |
716 | commpage_update_active_cpus(void) | |
717 | { | |
718 | char *cp; | |
719 | volatile uint8_t *ip; | |
720 | ||
721 | /* At least 32-bit commpage must be initialized */ | |
722 | if (!commPagePtr32) | |
723 | return; | |
724 | ||
725 | simple_lock(&commpage_active_cpus_lock); | |
726 | ||
727 | cp = commPagePtr32; | |
728 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS); | |
729 | ip = (volatile uint8_t*) cp; | |
730 | *ip = (uint8_t) processor_avail_count; | |
731 | ||
732 | cp = commPagePtr64; | |
733 | if ( cp ) { | |
734 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS); | |
735 | ip = (volatile uint8_t*) cp; | |
736 | *ip = (uint8_t) processor_avail_count; | |
737 | } | |
738 | ||
739 | simple_unlock(&commpage_active_cpus_lock); | |
740 | } | |
741 | ||
742 | /* | |
743 | * Update the commpage with current kdebug state. This currently has bits for | |
744 | * global trace state, and typefilter enablement. It is likely additional state | |
745 | * will be tracked in the future. | |
746 | * | |
747 | * INVARIANT: This value will always be 0 if global tracing is disabled. This | |
748 | * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }" | |
749 | */ | |
750 | void | |
751 | commpage_update_kdebug_state(void) | |
752 | { | |
753 | volatile uint32_t *saved_data_ptr; | |
754 | char *cp; | |
755 | ||
756 | cp = commPagePtr32; | |
757 | if (cp) { | |
758 | cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS); | |
759 | saved_data_ptr = (volatile uint32_t *)cp; | |
760 | *saved_data_ptr = kdebug_commpage_state(); | |
761 | } | |
762 | ||
763 | cp = commPagePtr64; | |
764 | if (cp) { | |
765 | cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS); | |
766 | saved_data_ptr = (volatile uint32_t *)cp; | |
767 | *saved_data_ptr = kdebug_commpage_state(); | |
768 | } | |
769 | } | |
770 | ||
771 | /* Ditto for atm_diagnostic_config */ | |
772 | void | |
773 | commpage_update_atm_diagnostic_config(uint32_t diagnostic_config) | |
774 | { | |
775 | volatile uint32_t *saved_data_ptr; | |
776 | char *cp; | |
777 | ||
778 | cp = commPagePtr32; | |
779 | if (cp) { | |
780 | cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_BASE_ADDRESS); | |
781 | saved_data_ptr = (volatile uint32_t *)cp; | |
782 | *saved_data_ptr = diagnostic_config; | |
783 | } | |
784 | ||
785 | cp = commPagePtr64; | |
786 | if ( cp ) { | |
787 | cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_START_ADDRESS); | |
788 | saved_data_ptr = (volatile uint32_t *)cp; | |
789 | *saved_data_ptr = diagnostic_config; | |
790 | } | |
791 | } | |
792 | ||
793 | /* | |
794 | * update the commpage data for last known value of mach_absolute_time() | |
795 | */ | |
796 | ||
797 | void | |
798 | commpage_update_mach_approximate_time(uint64_t abstime) | |
799 | { | |
800 | #ifdef CONFIG_MACH_APPROXIMATE_TIME | |
801 | uint64_t saved_data; | |
802 | char *cp; | |
803 | ||
804 | cp = commPagePtr32; | |
805 | if ( cp ) { | |
806 | cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS); | |
807 | saved_data = *(uint64_t *)cp; | |
808 | if (saved_data < abstime) { | |
809 | /* ignoring the success/fail return value assuming that | |
810 | * if the value has been updated since we last read it, | |
811 | * "someone" has a newer timestamp than us and ours is | |
812 | * now invalid. */ | |
813 | OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp); | |
814 | } | |
815 | } | |
816 | cp = commPagePtr64; | |
817 | if ( cp ) { | |
818 | cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS); | |
819 | saved_data = *(uint64_t *)cp; | |
820 | if (saved_data < abstime) { | |
821 | /* ignoring the success/fail return value assuming that | |
822 | * if the value has been updated since we last read it, | |
823 | * "someone" has a newer timestamp than us and ours is | |
824 | * now invalid. */ | |
825 | OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp); | |
826 | } | |
827 | } | |
828 | #else | |
829 | #pragma unused (abstime) | |
830 | #endif | |
831 | } | |
832 | ||
833 | void | |
834 | commpage_update_mach_continuous_time(uint64_t sleeptime) | |
835 | { | |
836 | char *cp; | |
837 | cp = commPagePtr32; | |
838 | if (cp) { | |
839 | cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); | |
840 | *(uint64_t *)cp = sleeptime; | |
841 | } | |
842 | ||
843 | cp = commPagePtr64; | |
844 | if (cp) { | |
845 | cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); | |
846 | *(uint64_t *)cp = sleeptime; | |
847 | } | |
848 | } | |
849 | ||
850 | void | |
851 | commpage_update_boottime(uint64_t boottime) | |
852 | { | |
853 | char *cp; | |
854 | cp = commPagePtr32; | |
855 | if (cp) { | |
856 | cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS); | |
857 | *(uint64_t *)cp = boottime; | |
858 | } | |
859 | ||
860 | cp = commPagePtr64; | |
861 | if (cp) { | |
862 | cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS); | |
863 | *(uint64_t *)cp = boottime; | |
864 | } | |
865 | } | |
866 | ||
867 | ||
868 | extern user32_addr_t commpage_text32_location; | |
869 | extern user64_addr_t commpage_text64_location; | |
870 | ||
871 | /* Check to see if a given address is in the Preemption Free Zone (PFZ) */ | |
872 | ||
873 | uint32_t | |
874 | commpage_is_in_pfz32(uint32_t addr32) | |
875 | { | |
876 | if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) | |
877 | && (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) { | |
878 | return 1; | |
879 | } | |
880 | else | |
881 | return 0; | |
882 | } | |
883 | ||
884 | uint32_t | |
885 | commpage_is_in_pfz64(addr64_t addr64) | |
886 | { | |
887 | if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET)) | |
888 | && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) { | |
889 | return 1; | |
890 | } | |
891 | else | |
892 | return 0; | |
893 | } | |
894 |