]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* | |
30 | * Here's what to do if you want to add a new routine to the comm page: | |
31 | * | |
32 | * 1. Add a definition for it's address in osfmk/i386/cpu_capabilities.h, | |
33 | * being careful to reserve room for future expansion. | |
34 | * | |
35 | * 2. Write one or more versions of the routine, each with it's own | |
36 | * commpage_descriptor. The tricky part is getting the "special", | |
37 | * "musthave", and "canthave" fields right, so that exactly one | |
38 | * version of the routine is selected for every machine. | |
39 | * The source files should be in osfmk/i386/commpage/. | |
40 | * | |
41 | * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" | |
42 | * array in osfmk/i386/commpage/commpage_asm.s. There are two | |
43 | * arrays, one for the 32-bit and one for the 64-bit commpage. | |
44 | * | |
45 | * 4. Write the code in Libc to use the new routine. | |
46 | */ | |
47 | ||
48 | #include <mach/mach_types.h> | |
49 | #include <mach/machine.h> | |
50 | #include <mach/vm_map.h> | |
51 | #include <mach/mach_vm.h> | |
52 | #include <mach/machine.h> | |
53 | #include <i386/cpuid.h> | |
54 | #include <i386/tsc.h> | |
55 | #include <i386/rtclock_protos.h> | |
56 | #include <i386/cpu_data.h> | |
57 | #include <i386/machine_routines.h> | |
58 | #include <i386/misc_protos.h> | |
59 | #include <i386/cpuid.h> | |
60 | #include <machine/cpu_capabilities.h> | |
61 | #include <machine/commpage.h> | |
62 | #include <machine/pmap.h> | |
63 | #include <vm/vm_kern.h> | |
64 | #include <vm/vm_map.h> | |
65 | ||
66 | #include <ipc/ipc_port.h> | |
67 | ||
68 | #include <kern/page_decrypt.h> | |
69 | #include <kern/processor.h> | |
70 | ||
71 | /* the lists of commpage routines are in commpage_asm.s */ | |
72 | extern commpage_descriptor* commpage_32_routines[]; | |
73 | extern commpage_descriptor* commpage_64_routines[]; | |
74 | ||
75 | extern vm_map_t commpage32_map; // the shared submap, set up in vm init | |
76 | extern vm_map_t commpage64_map; // the shared submap, set up in vm init | |
77 | ||
78 | char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage | |
79 | char *commPagePtr64 = NULL; // ...and of 64-bit commpage | |
80 | uint32_t _cpu_capabilities = 0; // define the capability vector | |
81 | ||
82 | int noVMX = 0; /* if true, do not set kHasAltivec in ppc _cpu_capabilities */ | |
83 | ||
84 | typedef uint32_t commpage_address_t; | |
85 | ||
86 | static commpage_address_t next; // next available address in comm page | |
87 | static commpage_address_t cur_routine; // comm page address of "current" routine | |
88 | static boolean_t matched; // true if we've found a match for "current" routine | |
89 | ||
90 | static char *commPagePtr; // virtual addr in kernel map of commpage we are working on | |
91 | static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map | |
92 | ||
93 | static commpage_time_data *time_data32 = NULL; | |
94 | static commpage_time_data *time_data64 = NULL; | |
95 | ||
96 | decl_simple_lock_data(static,commpage_active_cpus_lock); | |
97 | ||
98 | /* Allocate the commpage and add to the shared submap created by vm: | |
99 | * 1. allocate a page in the kernel map (RW) | |
100 | * 2. wire it down | |
101 | * 3. make a memory entry out of it | |
102 | * 4. map that entry into the shared comm region map (R-only) | |
103 | */ | |
104 | ||
105 | static void* | |
106 | commpage_allocate( | |
107 | vm_map_t submap, // commpage32_map or commpage_map64 | |
108 | size_t area_used ) // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED | |
109 | { | |
110 | vm_offset_t kernel_addr = 0; // address of commpage in kernel map | |
111 | vm_offset_t zero = 0; | |
112 | vm_size_t size = area_used; // size actually populated | |
113 | vm_map_entry_t entry; | |
114 | ipc_port_t handle; | |
115 | ||
116 | if (submap == NULL) | |
117 | panic("commpage submap is null"); | |
118 | ||
119 | if (vm_map(kernel_map,&kernel_addr,area_used,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE)) | |
120 | panic("cannot allocate commpage"); | |
121 | ||
122 | if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE)) | |
123 | panic("cannot wire commpage"); | |
124 | ||
125 | /* | |
126 | * Now that the object is created and wired into the kernel map, mark it so that no delay | |
127 | * copy-on-write will ever be performed on it as a result of mapping it into user-space. | |
128 | * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and | |
129 | * that would be a real disaster. | |
130 | * | |
131 | * JMM - What we really need is a way to create it like this in the first place. | |
132 | */ | |
133 | if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map) | |
134 | panic("cannot find commpage entry"); | |
135 | entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
136 | ||
137 | if (mach_make_memory_entry( kernel_map, // target map | |
138 | &size, // size | |
139 | kernel_addr, // offset (address in kernel map) | |
140 | VM_PROT_ALL, // map it RWX | |
141 | &handle, // this is the object handle we get | |
142 | NULL )) // parent_entry (what is this?) | |
143 | panic("cannot make entry for commpage"); | |
144 | ||
145 | if (vm_map_64( submap, // target map (shared submap) | |
146 | &zero, // address (map into 1st page in submap) | |
147 | area_used, // size | |
148 | 0, // mask | |
149 | VM_FLAGS_FIXED, // flags (it must be 1st page in submap) | |
150 | handle, // port is the memory entry we just made | |
151 | 0, // offset (map 1st page in memory entry) | |
152 | FALSE, // copy | |
153 | VM_PROT_READ|VM_PROT_EXECUTE, // cur_protection (R-only in user map) | |
154 | VM_PROT_READ|VM_PROT_EXECUTE, // max_protection | |
155 | VM_INHERIT_SHARE )) // inheritance | |
156 | panic("cannot map commpage"); | |
157 | ||
158 | ipc_port_release(handle); | |
159 | ||
160 | // Initialize the text section of the commpage with INT3 | |
161 | char *commpage_ptr = (char*)(intptr_t)kernel_addr; | |
162 | vm_size_t i; | |
163 | for( i = _COMM_PAGE_TEXT_START - _COMM_PAGE_START_ADDRESS; i < size; i++ ) | |
164 | // This is the hex for the X86 opcode INT3 | |
165 | commpage_ptr[i] = 0xCC; | |
166 | ||
167 | return (void*)(intptr_t)kernel_addr; // return address in kernel map | |
168 | } | |
169 | ||
170 | /* Get address (in kernel map) of a commpage field. */ | |
171 | ||
172 | static void* | |
173 | commpage_addr_of( | |
174 | commpage_address_t addr_at_runtime ) | |
175 | { | |
176 | return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); | |
177 | } | |
178 | ||
179 | /* Determine number of CPUs on this system. We cannot rely on | |
180 | * machine_info.max_cpus this early in the boot. | |
181 | */ | |
182 | static int | |
183 | commpage_cpus( void ) | |
184 | { | |
185 | int cpus; | |
186 | ||
187 | cpus = ml_get_max_cpus(); // NB: this call can block | |
188 | ||
189 | if (cpus == 0) | |
190 | panic("commpage cpus==0"); | |
191 | if (cpus > 0xFF) | |
192 | cpus = 0xFF; | |
193 | ||
194 | return cpus; | |
195 | } | |
196 | ||
197 | /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */ | |
198 | ||
199 | static void | |
200 | commpage_init_cpu_capabilities( void ) | |
201 | { | |
202 | uint32_t bits; | |
203 | int cpus; | |
204 | ml_cpu_info_t cpu_info; | |
205 | ||
206 | bits = 0; | |
207 | ml_cpu_get_info(&cpu_info); | |
208 | ||
209 | switch (cpu_info.vector_unit) { | |
210 | case 9: | |
211 | bits |= kHasAVX1_0; | |
212 | /* fall thru */ | |
213 | case 8: | |
214 | bits |= kHasSSE4_2; | |
215 | /* fall thru */ | |
216 | case 7: | |
217 | bits |= kHasSSE4_1; | |
218 | /* fall thru */ | |
219 | case 6: | |
220 | bits |= kHasSupplementalSSE3; | |
221 | /* fall thru */ | |
222 | case 5: | |
223 | bits |= kHasSSE3; | |
224 | /* fall thru */ | |
225 | case 4: | |
226 | bits |= kHasSSE2; | |
227 | /* fall thru */ | |
228 | case 3: | |
229 | bits |= kHasSSE; | |
230 | /* fall thru */ | |
231 | case 2: | |
232 | bits |= kHasMMX; | |
233 | default: | |
234 | break; | |
235 | } | |
236 | switch (cpu_info.cache_line_size) { | |
237 | case 128: | |
238 | bits |= kCache128; | |
239 | break; | |
240 | case 64: | |
241 | bits |= kCache64; | |
242 | break; | |
243 | case 32: | |
244 | bits |= kCache32; | |
245 | break; | |
246 | default: | |
247 | break; | |
248 | } | |
249 | cpus = commpage_cpus(); // how many CPUs do we have | |
250 | ||
251 | if (cpus == 1) | |
252 | bits |= kUP; | |
253 | ||
254 | bits |= (cpus << kNumCPUsShift); | |
255 | ||
256 | bits |= kFastThreadLocalStorage; // we use %gs for TLS | |
257 | ||
258 | if (cpu_mode_is64bit()) // k64Bit means processor is 64-bit capable | |
259 | bits |= k64Bit; | |
260 | ||
261 | if (tscFreq <= SLOW_TSC_THRESHOLD) /* is TSC too slow for _commpage_nanotime? */ | |
262 | bits |= kSlow; | |
263 | ||
264 | if (cpuid_features() & CPUID_FEATURE_AES) | |
265 | bits |= kHasAES; | |
266 | ||
267 | _cpu_capabilities = bits; // set kernel version for use by drivers etc | |
268 | } | |
269 | ||
270 | int | |
271 | _get_cpu_capabilities(void) | |
272 | { | |
273 | return _cpu_capabilities; | |
274 | } | |
275 | ||
276 | /* Copy data into commpage. */ | |
277 | ||
278 | static void | |
279 | commpage_stuff( | |
280 | commpage_address_t address, | |
281 | const void *source, | |
282 | int length ) | |
283 | { | |
284 | void *dest = commpage_addr_of(address); | |
285 | ||
286 | if (address < next) | |
287 | panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next); | |
288 | ||
289 | bcopy(source,dest,length); | |
290 | ||
291 | next = address + length; | |
292 | } | |
293 | ||
294 | /* Copy a routine into comm page if it matches running machine. | |
295 | */ | |
296 | static void | |
297 | commpage_stuff_routine( | |
298 | commpage_descriptor *rd ) | |
299 | { | |
300 | uint32_t must,cant; | |
301 | ||
302 | if (rd->commpage_address != cur_routine) { | |
303 | if ((cur_routine!=0) && (matched==0)) | |
304 | panic("commpage no match for last, next address %08x", rd->commpage_address); | |
305 | cur_routine = rd->commpage_address; | |
306 | matched = 0; | |
307 | } | |
308 | ||
309 | must = _cpu_capabilities & rd->musthave; | |
310 | cant = _cpu_capabilities & rd->canthave; | |
311 | ||
312 | if ((must == rd->musthave) && (cant == 0)) { | |
313 | if (matched) | |
314 | panic("commpage multiple matches for address %08x", rd->commpage_address); | |
315 | matched = 1; | |
316 | ||
317 | commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); | |
318 | } | |
319 | } | |
320 | ||
321 | /* Fill in the 32- or 64-bit commpage. Called once for each. | |
322 | */ | |
323 | ||
324 | static void | |
325 | commpage_populate_one( | |
326 | vm_map_t submap, // commpage32_map or compage64_map | |
327 | char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 | |
328 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED | |
329 | commpage_address_t base_offset, // will become commPageBaseOffset | |
330 | commpage_descriptor** commpage_routines, // list of routine ptrs for this commpage | |
331 | commpage_time_data** time_data, // &time_data32 or &time_data64 | |
332 | const char* signature ) // "commpage 32-bit" or "commpage 64-bit" | |
333 | { | |
334 | uint8_t c1; | |
335 | short c2; | |
336 | int c4; | |
337 | uint64_t c8; | |
338 | uint32_t cfamily; | |
339 | commpage_descriptor **rd; | |
340 | short version = _COMM_PAGE_THIS_VERSION; | |
341 | ||
342 | next = 0; | |
343 | cur_routine = 0; | |
344 | commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used ); | |
345 | *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 | |
346 | commPageBaseOffset = base_offset; | |
347 | ||
348 | *time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START ); | |
349 | ||
350 | /* Stuff in the constants. We move things into the comm page in strictly | |
351 | * ascending order, so we can check for overlap and panic if so. | |
352 | */ | |
353 | commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)strlen(signature)); | |
354 | commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); | |
355 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int)); | |
356 | ||
357 | c2 = 32; // default | |
358 | if (_cpu_capabilities & kCache64) | |
359 | c2 = 64; | |
360 | else if (_cpu_capabilities & kCache128) | |
361 | c2 = 128; | |
362 | commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); | |
363 | ||
364 | c4 = MP_SPIN_TRIES; | |
365 | commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4); | |
366 | ||
367 | /* machine_info valid after ml_get_max_cpus() */ | |
368 | c1 = machine_info.physical_cpu_max; | |
369 | commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1); | |
370 | c1 = machine_info.logical_cpu_max; | |
371 | commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1); | |
372 | ||
373 | c8 = ml_cpu_cache_size(0); | |
374 | commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8); | |
375 | ||
376 | cfamily = cpuid_info()->cpuid_cpufamily; | |
377 | commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); | |
378 | ||
379 | for( rd = commpage_routines; *rd != NULL ; rd++ ) | |
380 | commpage_stuff_routine(*rd); | |
381 | ||
382 | if (!matched) | |
383 | panic("commpage no match on last routine"); | |
384 | ||
385 | if (next > _COMM_PAGE_END) | |
386 | panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr); | |
387 | ||
388 | } | |
389 | ||
390 | ||
391 | /* Fill in commpages: called once, during kernel initialization, from the | |
392 | * startup thread before user-mode code is running. | |
393 | * | |
394 | * See the top of this file for a list of what you have to do to add | |
395 | * a new routine to the commpage. | |
396 | */ | |
397 | ||
398 | void | |
399 | commpage_populate( void ) | |
400 | { | |
401 | commpage_init_cpu_capabilities(); | |
402 | ||
403 | commpage_populate_one( commpage32_map, | |
404 | &commPagePtr32, | |
405 | _COMM_PAGE32_AREA_USED, | |
406 | _COMM_PAGE32_BASE_ADDRESS, | |
407 | commpage_32_routines, | |
408 | &time_data32, | |
409 | "commpage 32-bit"); | |
410 | #ifndef __LP64__ | |
411 | pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, | |
412 | _COMM_PAGE32_AREA_USED/INTEL_PGBYTES); | |
413 | #endif | |
414 | time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ | |
415 | ||
416 | if (_cpu_capabilities & k64Bit) { | |
417 | commpage_populate_one( commpage64_map, | |
418 | &commPagePtr64, | |
419 | _COMM_PAGE64_AREA_USED, | |
420 | _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ | |
421 | commpage_64_routines, | |
422 | &time_data64, | |
423 | "commpage 64-bit"); | |
424 | #ifndef __LP64__ | |
425 | pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, | |
426 | _COMM_PAGE64_AREA_USED/INTEL_PGBYTES); | |
427 | #endif | |
428 | } | |
429 | ||
430 | simple_lock_init(&commpage_active_cpus_lock, 0); | |
431 | ||
432 | commpage_update_active_cpus(); | |
433 | rtc_nanotime_init_commpage(); | |
434 | } | |
435 | ||
436 | ||
437 | /* Update commpage nanotime information. Note that we interleave | |
438 | * setting the 32- and 64-bit commpages, in order to keep nanotime more | |
439 | * nearly in sync between the two environments. | |
440 | * | |
441 | * This routine must be serialized by some external means, ie a lock. | |
442 | */ | |
443 | ||
444 | void | |
445 | commpage_set_nanotime( | |
446 | uint64_t tsc_base, | |
447 | uint64_t ns_base, | |
448 | uint32_t scale, | |
449 | uint32_t shift ) | |
450 | { | |
451 | commpage_time_data *p32 = time_data32; | |
452 | commpage_time_data *p64 = time_data64; | |
453 | static uint32_t generation = 0; | |
454 | uint32_t next_gen; | |
455 | ||
456 | if (p32 == NULL) /* have commpages been allocated yet? */ | |
457 | return; | |
458 | ||
459 | if ( generation != p32->nt_generation ) | |
460 | panic("nanotime trouble 1"); /* possibly not serialized */ | |
461 | if ( ns_base < p32->nt_ns_base ) | |
462 | panic("nanotime trouble 2"); | |
463 | if ((shift != 32) && ((_cpu_capabilities & kSlow)==0) ) | |
464 | panic("nanotime trouble 3"); | |
465 | ||
466 | next_gen = ++generation; | |
467 | if (next_gen == 0) | |
468 | next_gen = ++generation; | |
469 | ||
470 | p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
471 | p64->nt_generation = 0; | |
472 | ||
473 | p32->nt_tsc_base = tsc_base; | |
474 | p64->nt_tsc_base = tsc_base; | |
475 | ||
476 | p32->nt_ns_base = ns_base; | |
477 | p64->nt_ns_base = ns_base; | |
478 | ||
479 | p32->nt_scale = scale; | |
480 | p64->nt_scale = scale; | |
481 | ||
482 | p32->nt_shift = shift; | |
483 | p64->nt_shift = shift; | |
484 | ||
485 | p32->nt_generation = next_gen; /* mark data as valid */ | |
486 | p64->nt_generation = next_gen; | |
487 | } | |
488 | ||
489 | ||
490 | /* Disable commpage gettimeofday(), forcing commpage to call through to the kernel. */ | |
491 | ||
492 | void | |
493 | commpage_disable_timestamp( void ) | |
494 | { | |
495 | time_data32->gtod_generation = 0; | |
496 | time_data64->gtod_generation = 0; | |
497 | } | |
498 | ||
499 | ||
500 | /* Update commpage gettimeofday() information. As with nanotime(), we interleave | |
501 | * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync | |
502 | * between the two environments. | |
503 | * | |
504 | * This routine must be serializeed by some external means, ie a lock. | |
505 | */ | |
506 | ||
507 | void | |
508 | commpage_set_timestamp( | |
509 | uint64_t abstime, | |
510 | uint64_t secs ) | |
511 | { | |
512 | commpage_time_data *p32 = time_data32; | |
513 | commpage_time_data *p64 = time_data64; | |
514 | static uint32_t generation = 0; | |
515 | uint32_t next_gen; | |
516 | ||
517 | next_gen = ++generation; | |
518 | if (next_gen == 0) | |
519 | next_gen = ++generation; | |
520 | ||
521 | p32->gtod_generation = 0; /* mark invalid, so commpage won't try to use it */ | |
522 | p64->gtod_generation = 0; | |
523 | ||
524 | p32->gtod_ns_base = abstime; | |
525 | p64->gtod_ns_base = abstime; | |
526 | ||
527 | p32->gtod_sec_base = secs; | |
528 | p64->gtod_sec_base = secs; | |
529 | ||
530 | p32->gtod_generation = next_gen; /* mark data as valid */ | |
531 | p64->gtod_generation = next_gen; | |
532 | } | |
533 | ||
534 | ||
535 | /* Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure() */ | |
536 | ||
537 | void | |
538 | commpage_set_memory_pressure( | |
539 | unsigned int pressure ) | |
540 | { | |
541 | char *cp; | |
542 | uint32_t *ip; | |
543 | ||
544 | cp = commPagePtr32; | |
545 | if ( cp ) { | |
546 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS); | |
547 | ip = (uint32_t*) cp; | |
548 | *ip = (uint32_t) pressure; | |
549 | } | |
550 | ||
551 | cp = commPagePtr64; | |
552 | if ( cp ) { | |
553 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS); | |
554 | ip = (uint32_t*) cp; | |
555 | *ip = (uint32_t) pressure; | |
556 | } | |
557 | ||
558 | } | |
559 | ||
560 | ||
561 | /* Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. */ | |
562 | ||
563 | void | |
564 | commpage_set_spin_count( | |
565 | unsigned int count ) | |
566 | { | |
567 | char *cp; | |
568 | uint32_t *ip; | |
569 | ||
570 | if (count == 0) /* we test for 0 after decrement, not before */ | |
571 | count = 1; | |
572 | ||
573 | cp = commPagePtr32; | |
574 | if ( cp ) { | |
575 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); | |
576 | ip = (uint32_t*) cp; | |
577 | *ip = (uint32_t) count; | |
578 | } | |
579 | ||
580 | cp = commPagePtr64; | |
581 | if ( cp ) { | |
582 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); | |
583 | ip = (uint32_t*) cp; | |
584 | *ip = (uint32_t) count; | |
585 | } | |
586 | ||
587 | } | |
588 | ||
589 | /* Updated every time a logical CPU goes offline/online */ | |
590 | void | |
591 | commpage_update_active_cpus(void) | |
592 | { | |
593 | char *cp; | |
594 | volatile uint8_t *ip; | |
595 | ||
596 | /* At least 32-bit commpage must be initialized */ | |
597 | if (!commPagePtr32) | |
598 | return; | |
599 | ||
600 | simple_lock(&commpage_active_cpus_lock); | |
601 | ||
602 | cp = commPagePtr32; | |
603 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS); | |
604 | ip = (volatile uint8_t*) cp; | |
605 | *ip = (uint8_t) processor_avail_count; | |
606 | ||
607 | cp = commPagePtr64; | |
608 | if ( cp ) { | |
609 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS); | |
610 | ip = (volatile uint8_t*) cp; | |
611 | *ip = (uint8_t) processor_avail_count; | |
612 | } | |
613 | ||
614 | simple_unlock(&commpage_active_cpus_lock); | |
615 | } | |
616 | ||
617 | ||
618 | /* Check to see if a given address is in the Preemption Free Zone (PFZ) */ | |
619 | ||
620 | uint32_t | |
621 | commpage_is_in_pfz32(uint32_t addr32) | |
622 | { | |
623 | if ( (addr32 >= _COMM_PAGE_PFZ_START) && (addr32 < _COMM_PAGE_PFZ_END)) { | |
624 | return 1; | |
625 | } | |
626 | else | |
627 | return 0; | |
628 | } | |
629 | ||
630 | uint32_t | |
631 | commpage_is_in_pfz64(addr64_t addr64) | |
632 | { | |
633 | if ( (addr64 >= _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_START)) | |
634 | && (addr64 < _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_END))) { | |
635 | return 1; | |
636 | } | |
637 | else | |
638 | return 0; | |
639 | } | |
640 |