]>
Commit | Line | Data |
---|---|---|
43866e37 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2003-2010 Apple Inc. All rights reserved. |
43866e37 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
43866e37 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
43866e37 A |
27 | */ |
28 | ||
55e303ae A |
29 | /* |
30 | * Here's what to do if you want to add a new routine to the comm page: | |
31 | * | |
0c530ab8 | 32 | * 1. Add a definition for it's address in osfmk/i386/cpu_capabilities.h, |
55e303ae A |
33 | * being careful to reserve room for future expansion. |
34 | * | |
35 | * 2. Write one or more versions of the routine, each with it's own | |
36 | * commpage_descriptor. The tricky part is getting the "special", | |
37 | * "musthave", and "canthave" fields right, so that exactly one | |
38 | * version of the routine is selected for every machine. | |
0c530ab8 | 39 | * The source files should be in osfmk/i386/commpage/. |
55e303ae A |
40 | * |
41 | * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" | |
0c530ab8 A |
42 | * array in osfmk/i386/commpage/commpage_asm.s. There are two |
43 | * arrays, one for the 32-bit and one for the 64-bit commpage. | |
55e303ae A |
44 | * |
45 | * 4. Write the code in Libc to use the new routine. | |
46 | */ | |
47 | ||
48 | #include <mach/mach_types.h> | |
49 | #include <mach/machine.h> | |
91447636 | 50 | #include <mach/vm_map.h> |
b0d623f7 | 51 | #include <mach/mach_vm.h> |
7e4a7d39 A |
52 | #include <mach/machine.h> |
53 | #include <i386/cpuid.h> | |
2d21ac55 | 54 | #include <i386/tsc.h> |
6d2010ae | 55 | #include <i386/rtclock_protos.h> |
2d21ac55 | 56 | #include <i386/cpu_data.h> |
b0d623f7 A |
57 | #include <i386/machine_routines.h> |
58 | #include <i386/misc_protos.h> | |
7e4a7d39 | 59 | #include <i386/cpuid.h> |
43866e37 A |
60 | #include <machine/cpu_capabilities.h> |
61 | #include <machine/commpage.h> | |
55e303ae A |
62 | #include <machine/pmap.h> |
63 | #include <vm/vm_kern.h> | |
91447636 | 64 | #include <vm/vm_map.h> |
b0d623f7 | 65 | |
91447636 A |
66 | #include <ipc/ipc_port.h> |
67 | ||
0c530ab8 | 68 | #include <kern/page_decrypt.h> |
6d2010ae | 69 | #include <kern/processor.h> |
4452a7af | 70 | |
0c530ab8 A |
71 | /* the lists of commpage routines are in commpage_asm.s */ |
72 | extern commpage_descriptor* commpage_32_routines[]; | |
73 | extern commpage_descriptor* commpage_64_routines[]; | |
4452a7af | 74 | |
2d21ac55 A |
75 | extern vm_map_t commpage32_map; // the shared submap, set up in vm init |
76 | extern vm_map_t commpage64_map; // the shared submap, set up in vm init | |
316670eb A |
77 | extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init |
78 | extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init | |
79 | ||
4452a7af | 80 | |
0c530ab8 A |
81 | char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage |
82 | char *commPagePtr64 = NULL; // ...and of 64-bit commpage | |
bd504ef0 A |
83 | char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage |
84 | char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage | |
6601e61a | 85 | |
bd504ef0 | 86 | uint64_t _cpu_capabilities = 0; // define the capability vector |
0c530ab8 | 87 | |
b0d623f7 A |
88 | typedef uint32_t commpage_address_t; |
89 | ||
bd504ef0 | 90 | static commpage_address_t next; // next available address in comm page |
0c530ab8 A |
91 | |
92 | static char *commPagePtr; // virtual addr in kernel map of commpage we are working on | |
b0d623f7 | 93 | static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map |
55e303ae | 94 | |
2d21ac55 A |
95 | static commpage_time_data *time_data32 = NULL; |
96 | static commpage_time_data *time_data64 = NULL; | |
97 | ||
6d2010ae A |
98 | decl_simple_lock_data(static,commpage_active_cpus_lock); |
99 | ||
55e303ae A |
100 | /* Allocate the commpage and add to the shared submap created by vm: |
101 | * 1. allocate a page in the kernel map (RW) | |
102 | * 2. wire it down | |
103 | * 3. make a memory entry out of it | |
104 | * 4. map that entry into the shared comm region map (R-only) | |
105 | */ | |
106 | ||
107 | static void* | |
0c530ab8 | 108 | commpage_allocate( |
2d21ac55 | 109 | vm_map_t submap, // commpage32_map or commpage_map64 |
316670eb A |
110 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED |
111 | vm_prot_t uperm) | |
55e303ae | 112 | { |
2d21ac55 | 113 | vm_offset_t kernel_addr = 0; // address of commpage in kernel map |
0c530ab8 A |
114 | vm_offset_t zero = 0; |
115 | vm_size_t size = area_used; // size actually populated | |
116 | vm_map_entry_t entry; | |
117 | ipc_port_t handle; | |
316670eb | 118 | kern_return_t kr; |
0c530ab8 A |
119 | |
120 | if (submap == NULL) | |
121 | panic("commpage submap is null"); | |
122 | ||
316670eb A |
123 | if ((kr = vm_map(kernel_map,&kernel_addr,area_used,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE))) |
124 | panic("cannot allocate commpage %d", kr); | |
0c530ab8 | 125 | |
316670eb A |
126 | if ((kr = vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE))) |
127 | panic("cannot wire commpage: %d", kr); | |
0c530ab8 A |
128 | |
129 | /* | |
130 | * Now that the object is created and wired into the kernel map, mark it so that no delay | |
131 | * copy-on-write will ever be performed on it as a result of mapping it into user-space. | |
132 | * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and | |
133 | * that would be a real disaster. | |
134 | * | |
135 | * JMM - What we really need is a way to create it like this in the first place. | |
136 | */ | |
39236c6e | 137 | if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) |
316670eb | 138 | panic("cannot find commpage entry %d", kr); |
0c530ab8 A |
139 | entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
140 | ||
316670eb | 141 | if ((kr = mach_make_memory_entry( kernel_map, // target map |
0c530ab8 A |
142 | &size, // size |
143 | kernel_addr, // offset (address in kernel map) | |
316670eb | 144 | uperm, // protections as specified |
0c530ab8 | 145 | &handle, // this is the object handle we get |
316670eb A |
146 | NULL ))) // parent_entry (what is this?) |
147 | panic("cannot make entry for commpage %d", kr); | |
0c530ab8 | 148 | |
316670eb | 149 | if ((kr = vm_map_64( submap, // target map (shared submap) |
0c530ab8 A |
150 | &zero, // address (map into 1st page in submap) |
151 | area_used, // size | |
152 | 0, // mask | |
153 | VM_FLAGS_FIXED, // flags (it must be 1st page in submap) | |
154 | handle, // port is the memory entry we just made | |
155 | 0, // offset (map 1st page in memory entry) | |
156 | FALSE, // copy | |
316670eb A |
157 | uperm, // cur_protection (R-only in user map) |
158 | uperm, // max_protection | |
159 | VM_INHERIT_SHARE ))) // inheritance | |
160 | panic("cannot map commpage %d", kr); | |
0c530ab8 A |
161 | |
162 | ipc_port_release(handle); | |
316670eb A |
163 | /* Make the kernel mapping non-executable. This cannot be done |
164 | * at the time of map entry creation as mach_make_memory_entry | |
165 | * cannot handle disjoint permissions at this time. | |
166 | */ | |
167 | kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE); | |
168 | assert (kr == KERN_SUCCESS); | |
0c530ab8 | 169 | |
b0d623f7 | 170 | return (void*)(intptr_t)kernel_addr; // return address in kernel map |
55e303ae A |
171 | } |
172 | ||
173 | /* Get address (in kernel map) of a commpage field. */ | |
174 | ||
91447636 | 175 | static void* |
55e303ae | 176 | commpage_addr_of( |
b0d623f7 | 177 | commpage_address_t addr_at_runtime ) |
55e303ae | 178 | { |
b0d623f7 | 179 | return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); |
55e303ae A |
180 | } |
181 | ||
182 | /* Determine number of CPUs on this system. We cannot rely on | |
183 | * machine_info.max_cpus this early in the boot. | |
184 | */ | |
185 | static int | |
186 | commpage_cpus( void ) | |
187 | { | |
188 | int cpus; | |
189 | ||
190 | cpus = ml_get_max_cpus(); // NB: this call can block | |
191 | ||
192 | if (cpus == 0) | |
193 | panic("commpage cpus==0"); | |
194 | if (cpus > 0xFF) | |
195 | cpus = 0xFF; | |
196 | ||
197 | return cpus; | |
198 | } | |
43866e37 | 199 | |
55e303ae | 200 | /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */ |
43866e37 | 201 | |
55e303ae A |
202 | static void |
203 | commpage_init_cpu_capabilities( void ) | |
204 | { | |
bd504ef0 | 205 | uint64_t bits; |
55e303ae A |
206 | int cpus; |
207 | ml_cpu_info_t cpu_info; | |
43866e37 | 208 | |
55e303ae A |
209 | bits = 0; |
210 | ml_cpu_get_info(&cpu_info); | |
211 | ||
212 | switch (cpu_info.vector_unit) { | |
6d2010ae A |
213 | case 9: |
214 | bits |= kHasAVX1_0; | |
215 | /* fall thru */ | |
2d21ac55 A |
216 | case 8: |
217 | bits |= kHasSSE4_2; | |
218 | /* fall thru */ | |
219 | case 7: | |
220 | bits |= kHasSSE4_1; | |
221 | /* fall thru */ | |
0c530ab8 A |
222 | case 6: |
223 | bits |= kHasSupplementalSSE3; | |
224 | /* fall thru */ | |
55e303ae | 225 | case 5: |
91447636 | 226 | bits |= kHasSSE3; |
55e303ae A |
227 | /* fall thru */ |
228 | case 4: | |
229 | bits |= kHasSSE2; | |
230 | /* fall thru */ | |
231 | case 3: | |
232 | bits |= kHasSSE; | |
233 | /* fall thru */ | |
234 | case 2: | |
235 | bits |= kHasMMX; | |
236 | default: | |
237 | break; | |
238 | } | |
239 | switch (cpu_info.cache_line_size) { | |
240 | case 128: | |
241 | bits |= kCache128; | |
242 | break; | |
243 | case 64: | |
244 | bits |= kCache64; | |
245 | break; | |
246 | case 32: | |
247 | bits |= kCache32; | |
248 | break; | |
249 | default: | |
250 | break; | |
251 | } | |
252 | cpus = commpage_cpus(); // how many CPUs do we have | |
253 | ||
55e303ae A |
254 | bits |= (cpus << kNumCPUsShift); |
255 | ||
91447636 A |
256 | bits |= kFastThreadLocalStorage; // we use %gs for TLS |
257 | ||
bd504ef0 A |
258 | #define setif(_bits, _bit, _condition) \ |
259 | if (_condition) _bits |= _bit | |
260 | ||
261 | setif(bits, kUP, cpus == 1); | |
262 | setif(bits, k64Bit, cpu_mode_is64bit()); | |
263 | setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); | |
264 | ||
265 | setif(bits, kHasAES, cpuid_features() & | |
266 | CPUID_FEATURE_AES); | |
267 | setif(bits, kHasF16C, cpuid_features() & | |
268 | CPUID_FEATURE_F16C); | |
269 | setif(bits, kHasRDRAND, cpuid_features() & | |
270 | CPUID_FEATURE_RDRAND); | |
271 | setif(bits, kHasFMA, cpuid_features() & | |
272 | CPUID_FEATURE_FMA); | |
273 | ||
274 | setif(bits, kHasBMI1, cpuid_leaf7_features() & | |
275 | CPUID_LEAF7_FEATURE_BMI1); | |
276 | setif(bits, kHasBMI2, cpuid_leaf7_features() & | |
277 | CPUID_LEAF7_FEATURE_BMI2); | |
278 | setif(bits, kHasRTM, cpuid_leaf7_features() & | |
279 | CPUID_LEAF7_FEATURE_RTM); | |
280 | setif(bits, kHasHLE, cpuid_leaf7_features() & | |
281 | CPUID_LEAF7_FEATURE_HLE); | |
282 | setif(bits, kHasAVX2_0, cpuid_leaf7_features() & | |
283 | CPUID_LEAF7_FEATURE_AVX2); | |
284 | ||
285 | uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); | |
286 | setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && | |
287 | (cpuid_leaf7_features() & | |
288 | CPUID_LEAF7_FEATURE_ENFSTRG)); | |
289 | ||
55e303ae A |
290 | _cpu_capabilities = bits; // set kernel version for use by drivers etc |
291 | } | |
292 | ||
bd504ef0 | 293 | uint64_t |
2d21ac55 | 294 | _get_cpu_capabilities(void) |
0c530ab8 A |
295 | { |
296 | return _cpu_capabilities; | |
297 | } | |
298 | ||
55e303ae A |
299 | /* Copy data into commpage. */ |
300 | ||
301 | static void | |
302 | commpage_stuff( | |
b0d623f7 | 303 | commpage_address_t address, |
0c530ab8 | 304 | const void *source, |
55e303ae A |
305 | int length ) |
306 | { | |
307 | void *dest = commpage_addr_of(address); | |
308 | ||
b0d623f7 | 309 | if (address < next) |
6d2010ae | 310 | panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next); |
55e303ae A |
311 | |
312 | bcopy(source,dest,length); | |
43866e37 | 313 | |
b0d623f7 | 314 | next = address + length; |
55e303ae A |
315 | } |
316 | ||
317 | /* Copy a routine into comm page if it matches running machine. | |
318 | */ | |
319 | static void | |
320 | commpage_stuff_routine( | |
bd504ef0 | 321 | commpage_descriptor *rd ) |
55e303ae | 322 | { |
bd504ef0 | 323 | commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); |
55e303ae A |
324 | } |
325 | ||
0c530ab8 | 326 | /* Fill in the 32- or 64-bit commpage. Called once for each. |
55e303ae A |
327 | */ |
328 | ||
0c530ab8 A |
329 | static void |
330 | commpage_populate_one( | |
2d21ac55 | 331 | vm_map_t submap, // commpage32_map or compage64_map |
0c530ab8 A |
332 | char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 |
333 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED | |
b0d623f7 | 334 | commpage_address_t base_offset, // will become commPageBaseOffset |
2d21ac55 | 335 | commpage_time_data** time_data, // &time_data32 or &time_data64 |
316670eb A |
336 | const char* signature, // "commpage 32-bit" or "commpage 64-bit" |
337 | vm_prot_t uperm) | |
55e303ae | 338 | { |
bd504ef0 A |
339 | uint8_t c1; |
340 | uint16_t c2; | |
341 | int c4; | |
342 | uint64_t c8; | |
6d2010ae | 343 | uint32_t cfamily; |
55e303ae | 344 | short version = _COMM_PAGE_THIS_VERSION; |
55e303ae | 345 | |
b0d623f7 | 346 | next = 0; |
316670eb | 347 | commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm ); |
0c530ab8 A |
348 | *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 |
349 | commPageBaseOffset = base_offset; | |
b0d623f7 | 350 | |
2d21ac55 | 351 | *time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START ); |
55e303ae A |
352 | |
353 | /* Stuff in the constants. We move things into the comm page in strictly | |
354 | * ascending order, so we can check for overlap and panic if so. | |
bd504ef0 A |
355 | * Note: the 32-bit cpu_capabilities vector is retained in addition to |
356 | * the expanded 64-bit vector. | |
55e303ae | 357 | */ |
bd504ef0 A |
358 | commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature))); |
359 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities)); | |
6d2010ae | 360 | commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); |
bd504ef0 | 361 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t)); |
0c530ab8 | 362 | |
6d2010ae A |
363 | c2 = 32; // default |
364 | if (_cpu_capabilities & kCache64) | |
91447636 A |
365 | c2 = 64; |
366 | else if (_cpu_capabilities & kCache128) | |
367 | c2 = 128; | |
368 | commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); | |
bd504ef0 | 369 | |
b0d623f7 A |
370 | c4 = MP_SPIN_TRIES; |
371 | commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4); | |
91447636 | 372 | |
6d2010ae A |
373 | /* machine_info valid after ml_get_max_cpus() */ |
374 | c1 = machine_info.physical_cpu_max; | |
375 | commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1); | |
376 | c1 = machine_info.logical_cpu_max; | |
377 | commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1); | |
378 | ||
379 | c8 = ml_cpu_cache_size(0); | |
380 | commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8); | |
381 | ||
382 | cfamily = cpuid_info()->cpuid_cpufamily; | |
383 | commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); | |
6601e61a | 384 | |
b0d623f7 A |
385 | if (next > _COMM_PAGE_END) |
386 | panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr); | |
91447636 | 387 | |
43866e37 | 388 | } |
91447636 | 389 | |
0c530ab8 A |
390 | |
391 | /* Fill in commpages: called once, during kernel initialization, from the | |
392 | * startup thread before user-mode code is running. | |
393 | * | |
394 | * See the top of this file for a list of what you have to do to add | |
395 | * a new routine to the commpage. | |
396 | */ | |
91447636 A |
397 | |
398 | void | |
0c530ab8 | 399 | commpage_populate( void ) |
91447636 | 400 | { |
0c530ab8 A |
401 | commpage_init_cpu_capabilities(); |
402 | ||
2d21ac55 | 403 | commpage_populate_one( commpage32_map, |
0c530ab8 A |
404 | &commPagePtr32, |
405 | _COMM_PAGE32_AREA_USED, | |
406 | _COMM_PAGE32_BASE_ADDRESS, | |
2d21ac55 | 407 | &time_data32, |
316670eb A |
408 | "commpage 32-bit", |
409 | VM_PROT_READ); | |
b0d623f7 | 410 | #ifndef __LP64__ |
0c530ab8 A |
411 | pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, |
412 | _COMM_PAGE32_AREA_USED/INTEL_PGBYTES); | |
b0d623f7 | 413 | #endif |
2d21ac55 | 414 | time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ |
0c530ab8 A |
415 | |
416 | if (_cpu_capabilities & k64Bit) { | |
2d21ac55 | 417 | commpage_populate_one( commpage64_map, |
0c530ab8 A |
418 | &commPagePtr64, |
419 | _COMM_PAGE64_AREA_USED, | |
b0d623f7 | 420 | _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ |
2d21ac55 | 421 | &time_data64, |
316670eb A |
422 | "commpage 64-bit", |
423 | VM_PROT_READ); | |
b0d623f7 | 424 | #ifndef __LP64__ |
0c530ab8 A |
425 | pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, |
426 | _COMM_PAGE64_AREA_USED/INTEL_PGBYTES); | |
b0d623f7 | 427 | #endif |
0c530ab8 | 428 | } |
6601e61a | 429 | |
6d2010ae A |
430 | simple_lock_init(&commpage_active_cpus_lock, 0); |
431 | ||
432 | commpage_update_active_cpus(); | |
0c530ab8 | 433 | rtc_nanotime_init_commpage(); |
91447636 | 434 | } |
2d21ac55 | 435 | |
316670eb A |
436 | /* Fill in the common routines during kernel initialization. |
437 | * This is called before user-mode code is running. | |
438 | */ | |
439 | void commpage_text_populate( void ){ | |
440 | commpage_descriptor **rd; | |
441 | ||
bd504ef0 | 442 | next = 0; |
316670eb A |
443 | commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); |
444 | commPageTextPtr32 = commPagePtr; | |
445 | ||
446 | char *cptr = commPagePtr; | |
447 | int i=0; | |
448 | for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){ | |
449 | cptr[i]=0xCC; | |
450 | } | |
451 | ||
452 | commPageBaseOffset = _COMM_PAGE_TEXT_START; | |
453 | for (rd = commpage_32_routines; *rd != NULL; rd++) { | |
454 | commpage_stuff_routine(*rd); | |
455 | } | |
316670eb A |
456 | |
457 | #ifndef __LP64__ | |
458 | pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, | |
459 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); | |
460 | #endif | |
461 | ||
462 | if (_cpu_capabilities & k64Bit) { | |
bd504ef0 | 463 | next = 0; |
316670eb A |
464 | commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); |
465 | commPageTextPtr64 = commPagePtr; | |
466 | ||
467 | cptr=commPagePtr; | |
468 | for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){ | |
469 | cptr[i]=0xCC; | |
470 | } | |
471 | ||
472 | for (rd = commpage_64_routines; *rd !=NULL; rd++) { | |
473 | commpage_stuff_routine(*rd); | |
474 | } | |
475 | ||
476 | #ifndef __LP64__ | |
477 | pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, | |
478 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); | |
479 | #endif | |
480 | } | |
481 | ||
316670eb A |
482 | if (next > _COMM_PAGE_TEXT_END) |
483 | panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr); | |
484 | ||
485 | } | |
2d21ac55 | 486 | |
bd504ef0 | 487 | /* Update commpage nanotime information. |
2d21ac55 A |
488 | * |
489 | * This routine must be serialized by some external means, ie a lock. | |
490 | */ | |
491 | ||
492 | void | |
493 | commpage_set_nanotime( | |
494 | uint64_t tsc_base, | |
495 | uint64_t ns_base, | |
496 | uint32_t scale, | |
497 | uint32_t shift ) | |
498 | { | |
499 | commpage_time_data *p32 = time_data32; | |
500 | commpage_time_data *p64 = time_data64; | |
501 | static uint32_t generation = 0; | |
502 | uint32_t next_gen; | |
503 | ||
504 | if (p32 == NULL) /* have commpages been allocated yet? */ | |
505 | return; | |
506 | ||
507 | if ( generation != p32->nt_generation ) | |
508 | panic("nanotime trouble 1"); /* possibly not serialized */ | |
509 | if ( ns_base < p32->nt_ns_base ) | |
510 | panic("nanotime trouble 2"); | |
bd504ef0 | 511 | if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) ) |
2d21ac55 A |
512 | panic("nanotime trouble 3"); |
513 | ||
514 | next_gen = ++generation; | |
515 | if (next_gen == 0) | |
516 | next_gen = ++generation; | |
517 | ||
518 | p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
519 | p64->nt_generation = 0; | |
520 | ||
521 | p32->nt_tsc_base = tsc_base; | |
522 | p64->nt_tsc_base = tsc_base; | |
523 | ||
524 | p32->nt_ns_base = ns_base; | |
525 | p64->nt_ns_base = ns_base; | |
526 | ||
527 | p32->nt_scale = scale; | |
528 | p64->nt_scale = scale; | |
529 | ||
530 | p32->nt_shift = shift; | |
531 | p64->nt_shift = shift; | |
532 | ||
533 | p32->nt_generation = next_gen; /* mark data as valid */ | |
534 | p64->nt_generation = next_gen; | |
535 | } | |
536 | ||
537 | ||
538 | /* Disable commpage gettimeofday(), forcing commpage to call through to the kernel. */ | |
539 | ||
540 | void | |
541 | commpage_disable_timestamp( void ) | |
542 | { | |
543 | time_data32->gtod_generation = 0; | |
544 | time_data64->gtod_generation = 0; | |
545 | } | |
546 | ||
547 | ||
548 | /* Update commpage gettimeofday() information. As with nanotime(), we interleave | |
549 | * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync | |
550 | * between the two environments. | |
551 | * | |
552 | * This routine must be serializeed by some external means, ie a lock. | |
553 | */ | |
554 | ||
555 | void | |
556 | commpage_set_timestamp( | |
557 | uint64_t abstime, | |
558 | uint64_t secs ) | |
559 | { | |
560 | commpage_time_data *p32 = time_data32; | |
561 | commpage_time_data *p64 = time_data64; | |
562 | static uint32_t generation = 0; | |
563 | uint32_t next_gen; | |
564 | ||
565 | next_gen = ++generation; | |
566 | if (next_gen == 0) | |
567 | next_gen = ++generation; | |
568 | ||
569 | p32->gtod_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
570 | p64->gtod_generation = 0; | |
571 | ||
572 | p32->gtod_ns_base = abstime; | |
573 | p64->gtod_ns_base = abstime; | |
574 | ||
575 | p32->gtod_sec_base = secs; | |
576 | p64->gtod_sec_base = secs; | |
577 | ||
578 | p32->gtod_generation = next_gen; /* mark data as valid */ | |
579 | p64->gtod_generation = next_gen; | |
580 | } | |
b0d623f7 A |
581 | |
582 | ||
583 | /* Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure() */ | |
584 | ||
585 | void | |
586 | commpage_set_memory_pressure( | |
587 | unsigned int pressure ) | |
588 | { | |
589 | char *cp; | |
590 | uint32_t *ip; | |
591 | ||
592 | cp = commPagePtr32; | |
593 | if ( cp ) { | |
594 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS); | |
bd504ef0 | 595 | ip = (uint32_t*) (void *) cp; |
b0d623f7 A |
596 | *ip = (uint32_t) pressure; |
597 | } | |
598 | ||
599 | cp = commPagePtr64; | |
600 | if ( cp ) { | |
601 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS); | |
bd504ef0 | 602 | ip = (uint32_t*) (void *) cp; |
b0d623f7 A |
603 | *ip = (uint32_t) pressure; |
604 | } | |
605 | ||
606 | } | |
607 | ||
608 | ||
609 | /* Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. */ | |
610 | ||
611 | void | |
612 | commpage_set_spin_count( | |
613 | unsigned int count ) | |
614 | { | |
615 | char *cp; | |
616 | uint32_t *ip; | |
617 | ||
618 | if (count == 0) /* we test for 0 after decrement, not before */ | |
619 | count = 1; | |
620 | ||
621 | cp = commPagePtr32; | |
622 | if ( cp ) { | |
623 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); | |
bd504ef0 | 624 | ip = (uint32_t*) (void *) cp; |
b0d623f7 A |
625 | *ip = (uint32_t) count; |
626 | } | |
627 | ||
628 | cp = commPagePtr64; | |
629 | if ( cp ) { | |
630 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); | |
bd504ef0 | 631 | ip = (uint32_t*) (void *) cp; |
b0d623f7 A |
632 | *ip = (uint32_t) count; |
633 | } | |
634 | ||
635 | } | |
636 | ||
6d2010ae A |
637 | /* Updated every time a logical CPU goes offline/online */ |
638 | void | |
639 | commpage_update_active_cpus(void) | |
640 | { | |
641 | char *cp; | |
642 | volatile uint8_t *ip; | |
643 | ||
644 | /* At least 32-bit commpage must be initialized */ | |
645 | if (!commPagePtr32) | |
646 | return; | |
647 | ||
648 | simple_lock(&commpage_active_cpus_lock); | |
649 | ||
650 | cp = commPagePtr32; | |
651 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS); | |
652 | ip = (volatile uint8_t*) cp; | |
653 | *ip = (uint8_t) processor_avail_count; | |
654 | ||
655 | cp = commPagePtr64; | |
656 | if ( cp ) { | |
657 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS); | |
658 | ip = (volatile uint8_t*) cp; | |
659 | *ip = (uint8_t) processor_avail_count; | |
660 | } | |
661 | ||
662 | simple_unlock(&commpage_active_cpus_lock); | |
663 | } | |
664 | ||
316670eb A |
665 | extern user32_addr_t commpage_text32_location; |
666 | extern user64_addr_t commpage_text64_location; | |
b0d623f7 A |
667 | |
668 | /* Check to see if a given address is in the Preemption Free Zone (PFZ) */ | |
669 | ||
670 | uint32_t | |
671 | commpage_is_in_pfz32(uint32_t addr32) | |
672 | { | |
316670eb A |
673 | if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) |
674 | && (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) { | |
b0d623f7 A |
675 | return 1; |
676 | } | |
677 | else | |
678 | return 0; | |
679 | } | |
680 | ||
681 | uint32_t | |
682 | commpage_is_in_pfz64(addr64_t addr64) | |
683 | { | |
316670eb A |
684 | if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET)) |
685 | && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) { | |
b0d623f7 A |
686 | return 1; |
687 | } | |
688 | else | |
689 | return 0; | |
690 | } | |
691 |