]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/commpage/commpage.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / ppc / commpage / commpage.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Here's what to do if you want to add a new routine to the comm page:
31 *
32 * 1. Add a definition for it's address in osfmk/ppc/cpu_capabilities.h,
33 * being careful to reserve room for future expansion.
34 *
35 * 2. Write one or more versions of the routine, each with it's own
36 * commpage_descriptor. The tricky part is getting the "special",
37 * "musthave", and "canthave" fields right, so that exactly one
38 * version of the routine is selected for every machine.
39 * The source files should be in osfmk/ppc/commpage/.
40 *
41 * 3. Add a ptr to your new commpage_descriptor(s) in the "routines"
42 * static array below. Of course, you'll also have to declare them
43 * "extern".
44 *
45 * 4. Write the code in Libc to use the new routine.
46 */
47
48#include <mach/mach_types.h>
49#include <mach/machine.h>
50#include <mach/vm_map.h>
51#include <ppc/exception.h>
52#include <ppc/machine_routines.h>
53#include <machine/cpu_capabilities.h>
54#include <machine/commpage.h>
55#include <machine/pmap.h>
56#include <vm/vm_kern.h>
57#include <vm/vm_map.h>
58#include <ipc/ipc_port.h>
59
60extern vm_map_t commpage32_map; // the 32-bit shared submap, set up in vm init
61extern vm_map_t commpage64_map; // the 64-bit shared submap
62
63char *commPagePtr32 = NULL; // virtual address of 32-bit comm page in kernel map
64char *commPagePtr64 = NULL; // and 64-bit commpage
65int _cpu_capabilities = 0; // define the capability vector
66
67static char *next; // next available byte in comm page
68static int cur_routine; // comm page address of "current" routine
69static int matched; // true if we've found a match for "current" routine
70static char *commPagePtr; // virtual address in kernel of commpage we are working on
71
72extern commpage_descriptor compare_and_swap32_on32;
73extern commpage_descriptor compare_and_swap32_on64;
74extern commpage_descriptor compare_and_swap64;
75extern commpage_descriptor atomic_enqueue32;
76extern commpage_descriptor atomic_enqueue64;
77extern commpage_descriptor atomic_dequeue32_on32;
78extern commpage_descriptor atomic_dequeue32_on64;
79extern commpage_descriptor atomic_dequeue64;
80extern commpage_descriptor memory_barrier_up;
81extern commpage_descriptor memory_barrier_mp32;
82extern commpage_descriptor memory_barrier_mp64;
83extern commpage_descriptor atomic_add32;
84extern commpage_descriptor atomic_add64;
85extern commpage_descriptor mach_absolute_time_32;
86extern commpage_descriptor mach_absolute_time_64;
87extern commpage_descriptor mach_absolute_time_lp64;
88extern commpage_descriptor spinlock_32_try_mp;
89extern commpage_descriptor spinlock_32_try_up;
90extern commpage_descriptor spinlock_64_try_mp;
91extern commpage_descriptor spinlock_64_try_up;
92extern commpage_descriptor spinlock_32_lock_mp;
93extern commpage_descriptor spinlock_32_lock_up;
94extern commpage_descriptor spinlock_64_lock_mp;
95extern commpage_descriptor spinlock_64_lock_up;
96extern commpage_descriptor spinlock_32_unlock_mp;
97extern commpage_descriptor spinlock_32_unlock_up;
98extern commpage_descriptor spinlock_64_unlock_mp;
99extern commpage_descriptor spinlock_64_unlock_up;
100extern commpage_descriptor pthread_getspecific_sprg3_32;
101extern commpage_descriptor pthread_getspecific_sprg3_64;
102extern commpage_descriptor pthread_getspecific_uftrap;
103extern commpage_descriptor gettimeofday_32;
104extern commpage_descriptor gettimeofday_g5_32;
105extern commpage_descriptor gettimeofday_g5_64;
106extern commpage_descriptor commpage_flush_dcache;
107extern commpage_descriptor commpage_flush_icache;
108extern commpage_descriptor pthread_self_sprg3;
109extern commpage_descriptor pthread_self_uftrap;
110extern commpage_descriptor spinlock_relinquish;
111extern commpage_descriptor bzero_32;
112extern commpage_descriptor bzero_128;
113extern commpage_descriptor bcopy_g3;
114extern commpage_descriptor bcopy_g4;
115extern commpage_descriptor bcopy_970;
116extern commpage_descriptor bcopy_64;
117extern commpage_descriptor compare_and_swap32_on32b;
118extern commpage_descriptor compare_and_swap32_on64b;
119extern commpage_descriptor compare_and_swap64b;
120extern commpage_descriptor memset_64;
121extern commpage_descriptor memset_g3;
122extern commpage_descriptor memset_g4;
123extern commpage_descriptor memset_g5;
124extern commpage_descriptor bigcopy_970;
125
126/* The list of all possible commpage routines. WARNING: the check for overlap
127 * assumes that these routines are in strictly ascending order, sorted by address
128 * in the commpage. We panic if not.
129 */
130static commpage_descriptor *routines[] = {
131 &compare_and_swap32_on32,
132 &compare_and_swap32_on64,
133 &compare_and_swap64,
134 &atomic_enqueue32,
135 &atomic_enqueue64,
136 &atomic_dequeue32_on32,
137 &atomic_dequeue32_on64,
138 &atomic_dequeue64,
139 &memory_barrier_up,
140 &memory_barrier_mp32,
141 &memory_barrier_mp64,
142 &atomic_add32,
143 &atomic_add64,
144 &mach_absolute_time_32,
145 &mach_absolute_time_64,
146 &mach_absolute_time_lp64,
147 &spinlock_32_try_mp,
148 &spinlock_32_try_up,
149 &spinlock_64_try_mp,
150 &spinlock_64_try_up,
151 &spinlock_32_lock_mp,
152 &spinlock_32_lock_up,
153 &spinlock_64_lock_mp,
154 &spinlock_64_lock_up,
155 &spinlock_32_unlock_mp,
156 &spinlock_32_unlock_up,
157 &spinlock_64_unlock_mp,
158 &spinlock_64_unlock_up,
159 &pthread_getspecific_sprg3_32,
160 &pthread_getspecific_sprg3_64,
161 &pthread_getspecific_uftrap,
162 &gettimeofday_32,
163 &gettimeofday_g5_32,
164 &gettimeofday_g5_64,
165 &commpage_flush_dcache,
166 &commpage_flush_icache,
167 &pthread_self_sprg3,
168 &pthread_self_uftrap,
169 &spinlock_relinquish,
170 &bzero_32,
171 &bzero_128,
172 &bcopy_g3,
173 &bcopy_g4,
174 &bcopy_970,
175 &bcopy_64,
176 &compare_and_swap32_on32b,
177 &compare_and_swap32_on64b,
178 &compare_and_swap64b,
179 &memset_64,
180 &memset_g3,
181 &memset_g4,
182 &memset_g5,
183 &bigcopy_970,
184 NULL };
185
186
187/* Allocate the commpages and add to one of the shared submaps created by vm.
188 * Called once each for the 32 and 64-bit submaps.
189 * 1. allocate pages in the kernel map (RW)
190 * 2. wire them down
191 * 3. make a memory entry out of them
192 * 4. map that entry into the shared comm region map (R-only)
193 */
194static void*
195commpage_allocate(
196 vm_map_t submap ) // commpage32_map or commpage64_map
197{
198 vm_offset_t kernel_addr = 0; // address of commpage in kernel map
199 vm_offset_t zero = 0;
200 vm_size_t size = _COMM_PAGE_AREA_USED; // size actually populated
201 vm_map_entry_t entry;
202 ipc_port_t handle;
203
204 if (submap == NULL)
205 panic("commpage submap is null");
206
207 if (vm_map(kernel_map,&kernel_addr,_COMM_PAGE_AREA_USED,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE))
208 panic("cannot allocate commpage");
209
210 if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+_COMM_PAGE_AREA_USED,VM_PROT_DEFAULT,FALSE))
211 panic("cannot wire commpage");
212
213 /*
214 * Now that the object is created and wired into the kernel map, mark it so that no delay
215 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
216 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
217 * that would be a real disaster.
218 *
219 * JMM - What we really need is a way to create it like this in the first place.
220 */
221 if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map)
222 panic("cannot find commpage entry");
223 entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
224
225 if (mach_make_memory_entry( kernel_map, // target map
226 &size, // size
227 kernel_addr, // offset (address in kernel map)
228 VM_PROT_ALL, // map it RWX
229 &handle, // this is the object handle we get
230 NULL )) // parent_entry
231 panic("cannot make entry for commpage");
232
233 if (vm_map_64( submap, // target map (shared submap)
234 &zero, // address (map into 1st page in submap)
235 _COMM_PAGE_AREA_USED, // size
236 0, // mask
237 VM_FLAGS_FIXED, // flags (it must be 1st page in submap)
238 handle, // port is the memory entry we just made
239 0, // offset (map 1st page in memory entry)
240 FALSE, // copy
241 VM_PROT_READ|VM_PROT_EXECUTE, // cur_protection (R-only in user map)
242 VM_PROT_READ|VM_PROT_EXECUTE, // max_protection
243 VM_INHERIT_SHARE )) // inheritance
244 panic("cannot map commpage");
245
246 ipc_port_release(handle);
247
248 return (void*) kernel_addr; // return address in kernel map
249}
250
251
252/* Get address (in kernel map) of a commpage field. */
253
254static void*
255commpage_addr_of(
256 int addr_at_runtime )
257{
258 return (void*) (commPagePtr + addr_at_runtime - _COMM_PAGE_BASE_ADDRESS);
259}
260
261
262/* Determine number of CPUs on this system. We cannot rely on
263 * machine_info.max_cpus this early in the boot.
264 */
265static int
266commpage_cpus( void )
267{
268 int cpus;
269
270 cpus = ml_get_max_cpus(); // NB: this call can block
271
272 if (cpus == 0)
273 panic("commpage cpus==0");
274 if (cpus > 0xFF)
275 cpus = 0xFF;
276
277 return cpus;
278}
279
280
281/* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */
282
283static void
284commpage_init_cpu_capabilities( void )
285{
286 procFeatures *pfp;
287 int cpus;
288 int available;
289
290 pfp = &(PerProcTable[0].ppe_vaddr->pf); // point to features in per-proc
291 available = pfp->Available;
292
293 // If AltiVec is disabled make sure it is not reported as available.
294 if ((available & pfAltivec) == 0) {
295 _cpu_capabilities &= ~kHasAltivec;
296 }
297
298 if (_cpu_capabilities & kDcbaAvailable) { // if this processor has DCBA, time it...
299 _cpu_capabilities |= commpage_time_dcba(); // ...and set kDcbaRecomended if it helps.
300 }
301
302 cpus = commpage_cpus(); // how many CPUs do we have
303 if (cpus == 1) _cpu_capabilities |= kUP;
304 _cpu_capabilities |= (cpus << kNumCPUsShift);
305
306 if (_cpu_capabilities & k64Bit) // 64-bit processors use SPRG3 for TLS
307 _cpu_capabilities |= kFastThreadLocalStorage;
308}
309
310
311/* Copy data into commpage. */
312
313static void
314commpage_stuff(
315 int address,
316 const void *source,
317 int length )
318{
319 char *dest = commpage_addr_of(address);
320
321 if (dest < next)
322 panic("commpage overlap: %p - %p", dest, next);
323
324 bcopy((const char*)source,dest,length);
325
326 next = (dest + length);
327}
328
329
330/* Modify commpage code in-place for this specific platform. */
331
332static void
333commpage_change(
334 uint32_t *ptr,
335 int bytes,
336 uint32_t search_mask,
337 uint32_t search_pattern,
338 uint32_t new_mask,
339 uint32_t new_pattern,
340 int (*check)(uint32_t instruction) )
341{
342 int words = bytes >> 2;
343 uint32_t word;
344
345 while( (--words) >= 0 ) {
346 word = *ptr;
347 if ((word & search_mask)==search_pattern) {
348 if ((check==NULL) || (check(word))) { // check instruction if necessary
349 word &= ~new_mask;
350 word |= new_pattern;
351 *ptr = word;
352 }
353 }
354 ptr++;
355 }
356}
357
358
359/* Check to see if exactly one bit is set in a MTCRF instruction's FXM field.
360 */
361static int
362commpage_onebit(
363 uint32_t mtcrf )
364{
365 int x = (mtcrf >> 12) & 0xFF; // isolate the FXM field of the MTCRF
366
367 if (x==0)
368 panic("commpage bad mtcrf");
369
370 return (x & (x-1))==0 ? 1 : 0; // return 1 iff exactly 1 bit set in FXM field
371}
372
373
374/* Check to see if a RLWINM (whose ME is 31) is a SRWI. Since to shift right n bits
375 * you must "RLWINM ra,rs,32-n,n,31", if (SH+MB)==32 then we have a SRWI.
376 */
377static int
378commpage_srwi(
379 uint32_t rlwinm )
380{
381 int sh = (rlwinm >> 11) & 0x1F; // extract SH field of RLWINM, ie bits 16-20
382 int mb = (rlwinm >> 6 ) & 0x1F; // extract MB field of RLWINM, ie bits 21-25
383
384 return (sh + mb) == 32; // it is a SRWI if (SH+MB)==32
385}
386
387
388/* Handle kCommPageDCBA bit: the commpage routine uses DCBA. If the machine we're
389 * running on doesn't benefit from use of that instruction, map them to NOPs
390 * in the commpage.
391 */
392static void
393commpage_handle_dcbas(
394 int address,
395 int length )
396{
397 uint32_t *ptr, search_mask, search, replace_mask, replace;
398
399 if ( (_cpu_capabilities & kDcbaRecommended) == 0 ) {
400 ptr = commpage_addr_of(address);
401
402 search_mask = 0xFC0007FE; // search x-form opcode bits
403 search = 0x7C0005EC; // for a DCBA
404 replace_mask = 0xFFFFFFFF; // replace all bits...
405 replace = 0x60000000; // ...with a NOP
406
407 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
408 }
409}
410
411
412/* Handle kCommPageSYNC bit: this routine uses SYNC, LWSYNC, or EIEIO. If we're
413 * running on a UP machine, map them to NOPs.
414 */
415static void
416commpage_handle_syncs(
417 int address,
418 int length )
419{
420 uint32_t *ptr, search_mask, search, replace_mask, replace;
421
422 if (_NumCPUs() == 1) {
423 ptr = commpage_addr_of(address);
424
425 search_mask = 0xFC0005FE; // search x-form opcode bits (but ignore bit 0x00000200)
426 search = 0x7C0004AC; // for a SYNC, LWSYNC, or EIEIO
427 replace_mask = 0xFFFFFFFF; // replace all bits...
428 replace = 0x60000000; // ...with a NOP
429
430 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
431 }
432}
433
434
435/* Handle kCommPageISYNC bit: this routine uses ISYNCs. If we're running on a UP machine,
436 * map them to NOPs.
437 */
438static void
439commpage_handle_isyncs(
440 int address,
441 int length )
442{
443 uint32_t *ptr, search_mask, search, replace_mask, replace;
444
445 if (_NumCPUs() == 1) {
446 ptr = commpage_addr_of(address);
447
448 search_mask = 0xFC0007FE; // search xl-form opcode bits
449 search = 0x4C00012C; // for an ISYNC
450 replace_mask = 0xFFFFFFFF; // replace all bits...
451 replace = 0x60000000; // ...with a NOP
452
453 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
454 }
455}
456
457
458/* Handle kCommPageMTCRF bit. When this was written (3/03), the assembler did not
459 * recognize the special form of MTCRF instructions, in which exactly one bit is set
460 * in the 8-bit mask field. Bit 11 of the instruction should be set in this case,
461 * since the 970 and probably other 64-bit processors optimize it. Once the assembler
462 * has been updated this code can be removed, though it need not be.
463 */
464static void
465commpage_handle_mtcrfs(
466 int address,
467 int length )
468{
469 uint32_t *ptr, search_mask, search, replace_mask, replace;
470
471 if (_cpu_capabilities & k64Bit) {
472 ptr = commpage_addr_of(address);
473
474 search_mask = 0xFC0007FE; // search x-form opcode bits
475 search = 0x7C000120; // for a MTCRF
476 replace_mask = 0x00100000; // replace bit 11...
477 replace = 0x00100000; // ...with a 1-bit
478
479 commpage_change(ptr,length,search_mask,search,replace_mask,replace,commpage_onebit);
480 }
481}
482
483
484/* Port 32-bit code to 64-bit for use in the 64-bit commpage. This sounds fancier than
485 * it is. We do the following:
486 * - map "cmpw*" into "cmpd*"
487 * - map "srwi" into "srdi"
488 * Perhaps surprisingly, this is enough to permit lots of code to run in 64-bit mode, as
489 * long as it is written with this in mind.
490 */
491static void
492commpage_port_32_to_64(
493 int address,
494 int length )
495{
496 uint32_t *ptr, search_mask, search, replace_mask, replace;
497
498 ptr = commpage_addr_of(address);
499
500 search_mask = 0xFC2007FE; // search x-form opcode bits (and L bit)
501 search = 0x7C000000; // for a CMPW
502 replace_mask = 0x00200000; // replace bit 10 (L)...
503 replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares
504 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
505
506 search_mask = 0xFC2007FE; // search x-form opcode bits (and L bit)
507 search = 0x7C000040; // for a CMPLW
508 replace_mask = 0x00200000; // replace bit 10 (L)...
509 replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares
510 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
511
512 search_mask = 0xFC200000; // search d-form opcode bits (and L bit)
513 search = 0x28000000; // for a CMPLWI
514 replace_mask = 0x00200000; // replace bit 10 (L)...
515 replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares
516 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
517
518 search_mask = 0xFC200000; // search d-form opcode bits (and L bit)
519 search = 0x2C000000; // for a CMPWI
520 replace_mask = 0x00200000; // replace bit 10 (L)...
521 replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares
522 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
523
524 search_mask = 0xFC00003E; // search d-form opcode bits and ME (mask end) field
525 search = 0x5400003E; // for an RLWINM with ME=31 (which might be a "srwi")
526 replace_mask = 0xFC00003E; // then replace RLWINM's opcode and ME field to make a RLDICL
527 replace = 0x78000002; // opcode is 30, ME is 0, except we add 32 to SH amount
528 commpage_change(ptr,length,search_mask,search,replace_mask,replace,commpage_srwi);
529}
530
531
532/* Copy a routine into comm page if it matches running machine.
533 */
534static void
535commpage_stuff_routine(
536 commpage_descriptor *rd,
537 int mode ) // kCommPage32 or kCommPage64
538{
539 char *routine_code;
540 int must,cant;
541
542 if ( (rd->special & mode) == 0 ) // is this routine useable in this mode?
543 return;
544
545 if (rd->commpage_address != cur_routine) {
546 if ((cur_routine!=0) && (matched==0))
547 panic("commpage no match for last, next address %08x", rd->commpage_address);
548 cur_routine = rd->commpage_address;
549 matched = 0;
550 }
551
552 must = _cpu_capabilities & rd->musthave;
553 cant = _cpu_capabilities & rd->canthave;
554
555 if ((must == rd->musthave) && (cant == 0)) {
556 if (matched)
557 panic("commpage multiple matches for address %08x", rd->commpage_address);
558 matched = 1;
559 routine_code = ((char*)rd) + rd->code_offset;
560
561 commpage_stuff(rd->commpage_address,routine_code,rd->code_length);
562
563 if (rd->special & kCommPageDCBA)
564 commpage_handle_dcbas(rd->commpage_address,rd->code_length);
565
566 if (rd->special & kCommPageSYNC)
567 commpage_handle_syncs(rd->commpage_address,rd->code_length);
568
569 if (rd->special & kCommPageISYNC)
570 commpage_handle_isyncs(rd->commpage_address,rd->code_length);
571
572 if (rd->special & kCommPageMTCRF)
573 commpage_handle_mtcrfs(rd->commpage_address,rd->code_length);
574
575 if ((mode == kCommPage64) && (rd->special & kPort32to64))
576 commpage_port_32_to_64(rd->commpage_address,rd->code_length);
577 }
578}
579
580
581/* Fill in the 32- or 64-bit commpage. Called once for each. */
582
583static void
584commpage_populate_one(
585 vm_map_t submap, // the map to populate
586 char ** kernAddressPtr, // address within kernel of this commpage
587 int mode, // either kCommPage32 or kCommPage64
588 const char* signature ) // "commpage 32-bit" or "commpage 64-bit"
589{
590 char c1;
591 short c2;
592 addr64_t c8;
593 static double two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52
594 static double ten6 = 1000000.0; // 10**6
595 static uint64_t magicFE = 0xFEFEFEFEFEFEFEFFLL; // used to find 0s in strings
596 static uint64_t magic80 = 0x8080808080808080LL; // also used to find 0s
597 commpage_descriptor **rd;
598 short version = _COMM_PAGE_THIS_VERSION;
599
600 next = NULL; // initialize next available byte in the commpage
601 cur_routine = 0; // initialize comm page address of "current" routine
602
603 commPagePtr = (char*) commpage_allocate( submap );
604 *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64
605
606 /* Stuff in the constants. We move things into the comm page in strictly
607 * ascending order, so we can check for overlap and panic if so.
608 */
609
610 commpage_stuff(_COMM_PAGE_SIGNATURE,signature,strlen(signature));
611
612 commpage_stuff(_COMM_PAGE_VERSION,&version,2);
613
614 commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
615
616 c1 = (_cpu_capabilities & kHasAltivec) ? -1 : 0;
617 commpage_stuff(_COMM_PAGE_ALTIVEC,&c1,1);
618
619 c1 = (_cpu_capabilities & k64Bit) ? -1 : 0;
620 commpage_stuff(_COMM_PAGE_64_BIT,&c1,1);
621
622 if (_cpu_capabilities & kCache32)
623 c2 = 32;
624 else if (_cpu_capabilities & kCache64)
625 c2 = 64;
626 else if (_cpu_capabilities & kCache128)
627 c2 = 128;
628 commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
629
630 commpage_stuff(_COMM_PAGE_2_TO_52,&two52,8);
631 commpage_stuff(_COMM_PAGE_10_TO_6,&ten6,8);
632 commpage_stuff(_COMM_PAGE_MAGIC_FE,&magicFE,8);
633 commpage_stuff(_COMM_PAGE_MAGIC_80,&magic80,8);
634
635 c8 = 0; // 0 timestamp means "disabled"
636 commpage_stuff(_COMM_PAGE_TIMEBASE,&c8,8);
637 commpage_stuff(_COMM_PAGE_TIMESTAMP,&c8,8);
638 commpage_stuff(_COMM_PAGE_SEC_PER_TICK,&c8,8);
639
640 /* Now the routines. We try each potential routine in turn,
641 * and copy in any that "match" the platform we are running on.
642 * We require that exactly one routine match for each slot in the
643 * comm page, and panic if not.
644 */
645
646 for( rd = routines; *rd != NULL ; rd++ )
647 commpage_stuff_routine(*rd,mode);
648
649 if (!matched)
650 panic("commpage no match on last routine");
651
652 if (next > (commPagePtr + _COMM_PAGE_AREA_USED))
653 panic("commpage overflow");
654
655
656 // make all that new code executable
657
658 sync_cache_virtual((vm_offset_t) commPagePtr,_COMM_PAGE_AREA_USED);
659}
660
661
662/* Fill in commpage: called once, during kernel initialization, from the
663 * startup thread before user-mode code is running.
664 *
665 * See the top of this file for a list of what you have to do to add
666 * a new routine to the commpage.
667 */
668
669void
670commpage_populate( void )
671{
672 commpage_init_cpu_capabilities();
673 commpage_populate_one( commpage32_map, &commPagePtr32, kCommPage32, "commpage 32-bit");
674 if (_cpu_capabilities & k64Bit) {
675 commpage_populate_one( commpage64_map, &commPagePtr64, kCommPage64, "commpage 64-bit");
676 pmap_init_sharedpage((vm_offset_t)commPagePtr64); // Do the 64-bit version
677 }
678
679}