]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/commpage/commpage.c
1c67e50fed3598d3856bd40804efa7ec7413796d
[apple/xnu.git] / osfmk / ppc / commpage / commpage.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 /*
27 * Here's what to do if you want to add a new routine to the comm page:
28 *
29 * 1. Add a definition for it's address in osfmk/ppc/cpu_capabilities.h,
30 * being careful to reserve room for future expansion.
31 *
32 * 2. Write one or more versions of the routine, each with it's own
33 * commpage_descriptor. The tricky part is getting the "special",
34 * "musthave", and "canthave" fields right, so that exactly one
35 * version of the routine is selected for every machine.
36 * The source files should be in osfmk/ppc/commpage/.
37 *
38 * 3. Add a ptr to your new commpage_descriptor(s) in the "routines"
39 * array in commpage_populate(). Of course, you'll also have to
40 * declare them "extern" in commpage_populate().
41 *
42 * 4. Write the code in Libc to use the new routine.
43 */
44
45 #include <mach/mach_types.h>
46 #include <mach/machine.h>
47 #include <ppc/exception.h>
48 #include <ppc/machine_routines.h>
49 #include <machine/cpu_capabilities.h>
50 #include <machine/commpage.h>
51 #include <machine/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <mach/vm_map.h>
54
55 static char *next = NULL; // next available byte in comm page
56 static int cur_routine = 0; // comm page address of "current" routine
57 static int matched; // true if we've found a match for "current" routine
58
59 int _cpu_capabilities = 0; // define the capability vector
60
61 char *commPagePtr = NULL; // virtual address of comm page in kernel map
62
63
64 /* Allocate the commpages and add to the shared submap created by vm:
65 * 1. allocate pages in the kernel map (RW)
66 * 2. wire them down
67 * 3. make a memory entry out of them
68 * 4. map that entry into the shared comm region map (R-only)
69 */
70 static void*
71 commpage_allocate( void )
72 {
73 extern vm_map_t com_region_map; // the shared submap, set up in vm init
74 vm_offset_t kernel_addr; // address of commpage in kernel map
75 vm_offset_t zero = 0;
76 vm_size_t size = _COMM_PAGE_AREA_USED; // size actually populated
77 ipc_port_t handle;
78
79 if (com_region_map == NULL)
80 panic("commpage map is null");
81
82 if (vm_allocate(kernel_map,&kernel_addr,_COMM_PAGE_AREA_USED,VM_FLAGS_ANYWHERE))
83 panic("cannot allocate commpage");
84
85 if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+_COMM_PAGE_AREA_USED,VM_PROT_DEFAULT,FALSE))
86 panic("cannot wire commpage");
87
88 if (mach_make_memory_entry( kernel_map, // target map
89 &size, // size
90 kernel_addr, // offset (address in kernel map)
91 VM_PROT_DEFAULT, // map it RW
92 &handle, // this is the object handle we get
93 NULL )) // parent_entry
94 panic("cannot make entry for commpage");
95
96 if (vm_map_64( com_region_map, // target map (shared submap)
97 &zero, // address (map into 1st page in submap)
98 _COMM_PAGE_AREA_USED, // size
99 0, // mask
100 VM_FLAGS_FIXED, // flags (it must be 1st page in submap)
101 handle, // port is the memory entry we just made
102 0, // offset (map 1st page in memory entry)
103 FALSE, // copy
104 VM_PROT_READ, // cur_protection (R-only in user map)
105 VM_PROT_READ, // max_protection
106 VM_INHERIT_SHARE )) // inheritance
107 panic("cannot map commpage");
108
109 ipc_port_release(handle);
110
111 return (void*) kernel_addr; // return address in kernel map
112 }
113
114
115 /* Get address (in kernel map) of a commpage field. */
116
117 static void*
118 commpage_addr_of(
119 int addr_at_runtime )
120 {
121 return (void*) (commPagePtr + addr_at_runtime - _COMM_PAGE_BASE_ADDRESS);
122 }
123
124
125 /* Determine number of CPUs on this system. We cannot rely on
126 * machine_info.max_cpus this early in the boot.
127 */
128 static int
129 commpage_cpus( void )
130 {
131 int cpus;
132
133 cpus = ml_get_max_cpus(); // NB: this call can block
134
135 if (cpus == 0)
136 panic("commpage cpus==0");
137 if (cpus > 0xFF)
138 cpus = 0xFF;
139
140 return cpus;
141 }
142
143
144 /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */
145
146 static void
147 commpage_init_cpu_capabilities( void )
148 {
149 struct per_proc_info *pp;
150 procFeatures *pfp;
151 int cpus;
152 int available;
153
154 pp = per_proc_info; // use CPU 0's per-proc
155 pfp = &pp->pf; // point to features in per-proc
156 available = pfp->Available;
157
158 // If AltiVec is disabled make sure it is not reported as available.
159 if ((available & pfAltivec) == 0) {
160 _cpu_capabilities &= ~kHasAltivec;
161 }
162
163 if (_cpu_capabilities & kDcbaAvailable) { // if this processor has DCBA, time it...
164 _cpu_capabilities |= commpage_time_dcba(); // ...and set kDcbaRecomended if it helps.
165 }
166
167 cpus = commpage_cpus(); // how many CPUs do we have
168 if (cpus == 1) _cpu_capabilities |= kUP;
169 _cpu_capabilities |= (cpus << kNumCPUsShift);
170 }
171
172
173 /* Copy data into commpage. */
174
175 void
176 commpage_stuff(
177 int address,
178 void *source,
179 int length )
180 {
181 char *dest = commpage_addr_of(address);
182
183 if (dest < next)
184 panic("commpage overlap: %08 - %08X", dest, next);
185
186 bcopy((char*)source,dest,length);
187
188 next = (dest + length);
189 }
190
191
192 /* Modify commpage code in-place for this specific platform. */
193
194 static void
195 commpage_change(
196 uint32_t *ptr,
197 int bytes,
198 uint32_t search_mask,
199 uint32_t search_pattern,
200 uint32_t new_mask,
201 uint32_t new_pattern,
202 int (*check)(uint32_t instruction) )
203 {
204 int words = bytes >> 2;
205 uint32_t word;
206 int found_one = 0;
207
208 while( (--words) >= 0 ) {
209 word = *ptr;
210 if ((word & search_mask)==search_pattern) {
211 if ((check==NULL) || (check(word))) { // check instruction if necessary
212 found_one = 1;
213 word &= ~new_mask;
214 word |= new_pattern;
215 *ptr = word;
216 }
217 }
218 ptr++;
219 }
220
221 if (!found_one)
222 panic("commpage opcode not found");
223 }
224
225
226 /* Check to see if exactly one bit is set in a MTCRF instruction's FXM field.
227 */
228 static int
229 commpage_onebit(
230 uint32_t mtcrf )
231 {
232 int x = (mtcrf >> 12) & 0xFF; // isolate the FXM field of the MTCRF
233
234 if (x==0)
235 panic("commpage bad mtcrf");
236
237 return (x & (x-1))==0 ? 1 : 0; // return 1 iff exactly 1 bit set in FXM field
238 }
239
240
241 /* Handle kCommPageDCBA bit: this routine uses DCBA. If the machine we're
242 * running on doesn't benefit from use of that instruction, map them to NOPs
243 * in the commpage.
244 */
245 static void
246 commpage_handle_dcbas(
247 int address,
248 int length )
249 {
250 uint32_t *ptr, search_mask, search, replace_mask, replace;
251
252 if ((_cpu_capabilities & kDcbaAvailable) == 0) {
253 ptr = commpage_addr_of(address);
254
255 search_mask = 0xFC0007FE; // search x-form opcode bits
256 search = 0x7C0005EC; // for a DCBA
257 replace_mask = 0xFFFFFFFF; // replace all bits...
258 replace = 0x60000000; // ...with a NOP
259
260 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
261 }
262 }
263
264
265 /* Handle kCommPageSYNC bit: this routine uses SYNC or LWSYNC. If we're
266 * running on a UP machine, map them to NOPs.
267 */
268 static void
269 commpage_handle_syncs(
270 int address,
271 int length )
272 {
273 uint32_t *ptr, search_mask, search, replace_mask, replace;
274
275 if (_NumCPUs() == 1) {
276 ptr = commpage_addr_of(address);
277
278 search_mask = 0xFC0007FE; // search x-form opcode bits
279 search = 0x7C0004AC; // for a SYNC or LWSYNC
280 replace_mask = 0xFFFFFFFF; // replace all bits...
281 replace = 0x60000000; // ...with a NOP
282
283 commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL);
284 }
285 }
286
287
288 /* Handle kCommPageMTCRF bit. When this was written (3/03), the assembler did not
289 * recognize the special form of MTCRF instructions, in which exactly one bit is set
290 * in the 8-bit mask field. Bit 11 of the instruction should be set in this case,
291 * since the 970 and probably other 64-bit processors optimize it. Once the assembler
292 * has been updated this code can be removed, though it need not be.
293 */
294 static void
295 commpage_handle_mtcrfs(
296 int address,
297 int length )
298 {
299 uint32_t *ptr, search_mask, search, replace_mask, replace;
300
301 if (_cpu_capabilities & k64Bit) {
302 ptr = commpage_addr_of(address);
303
304 search_mask = 0xFC0007FE; // search x-form opcode bits
305 search = 0x7C000120; // for a MTCRF
306 replace_mask = 0x00100000; // replace bit 11...
307 replace = 0x00100000; // ...with a 1-bit
308
309 commpage_change(ptr,length,search_mask,search,replace_mask,replace,commpage_onebit);
310 }
311 }
312
313
314 /* Copy a routine into comm page if it matches running machine.
315 */
316 static void
317 commpage_stuff_routine(
318 commpage_descriptor *rd )
319 {
320 char *routine_code;
321 int must,cant;
322
323 if (rd->commpage_address != cur_routine) {
324 if ((cur_routine!=0) && (matched==0))
325 panic("commpage no match");
326 cur_routine = rd->commpage_address;
327 matched = 0;
328 }
329
330 must = _cpu_capabilities & rd->musthave;
331 cant = _cpu_capabilities & rd->canthave;
332
333 if ((must == rd->musthave) && (cant == 0)) {
334 if (matched)
335 panic("commpage duplicate matches");
336 matched = 1;
337 routine_code = ((char*)rd) + rd->code_offset;
338
339 commpage_stuff(rd->commpage_address,routine_code,rd->code_length);
340
341 if (rd->special & kCommPageDCBA)
342 commpage_handle_dcbas(rd->commpage_address,rd->code_length);
343
344 if (rd->special & kCommPageSYNC)
345 commpage_handle_syncs(rd->commpage_address,rd->code_length);
346
347 if (rd->special & kCommPageMTCRF)
348 commpage_handle_mtcrfs(rd->commpage_address,rd->code_length);
349 }
350 }
351
352
353 /* Fill in commpage: called once, during kernel initialization, from the
354 * startup thread before user-mode code is running.
355 * See the top of this file for a list of what you have to do to add
356 * a new routine to the commpage.
357 */
358 void
359 commpage_populate( void )
360 {
361 char c1;
362 short c2;
363 addr64_t c8;
364 static double two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52
365 static double ten6 = 1000000.0; // 10**6
366 commpage_descriptor **rd;
367 short version = _COMM_PAGE_THIS_VERSION;
368
369
370 commPagePtr = (char*) commpage_allocate();
371
372 commpage_init_cpu_capabilities();
373
374
375 /* Stuff in the constants. We move things into the comm page in strictly
376 * ascending order, so we can check for overlap and panic if so.
377 */
378
379 commpage_stuff(_COMM_PAGE_VERSION,&version,2);
380
381 commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
382
383 c1 = (_cpu_capabilities & kHasAltivec) ? -1 : 0;
384 commpage_stuff(_COMM_PAGE_ALTIVEC,&c1,1);
385
386 c1 = (_cpu_capabilities & k64Bit) ? -1 : 0;
387 commpage_stuff(_COMM_PAGE_64_BIT,&c1,1);
388
389 if (_cpu_capabilities & kCache32)
390 c2 = 32;
391 else if (_cpu_capabilities & kCache64)
392 c2 = 64;
393 else if (_cpu_capabilities & kCache128)
394 c2 = 128;
395 commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
396
397 commpage_stuff(_COMM_PAGE_2_TO_52,&two52,8);
398
399 commpage_stuff(_COMM_PAGE_10_TO_6,&ten6,8);
400
401 c8 = 0; // 0 timestamp means "disabled"
402 commpage_stuff(_COMM_PAGE_TIMEBASE,&c8,8);
403 commpage_stuff(_COMM_PAGE_TIMESTAMP,&c8,8);
404 commpage_stuff(_COMM_PAGE_SEC_PER_TICK,&c8,8);
405
406
407 /* Now the routines. We try each potential routine in turn,
408 * and copy in any that "match" the platform we are running on.
409 * We require that exactly one routine match for each slot in the
410 * comm page, and panic if not.
411 *
412 * The check for overlap assumes that these routines are
413 * in strictly ascending order, sorted by address in the
414 * comm page.
415 */
416
417 extern commpage_descriptor mach_absolute_time_32;
418 extern commpage_descriptor mach_absolute_time_64;
419 extern commpage_descriptor spinlock_32_try_mp;
420 extern commpage_descriptor spinlock_32_try_up;
421 extern commpage_descriptor spinlock_64_try_mp;
422 extern commpage_descriptor spinlock_64_try_up;
423 extern commpage_descriptor spinlock_32_lock_mp;
424 extern commpage_descriptor spinlock_32_lock_up;
425 extern commpage_descriptor spinlock_64_lock_mp;
426 extern commpage_descriptor spinlock_64_lock_up;
427 extern commpage_descriptor spinlock_32_unlock_mp;
428 extern commpage_descriptor spinlock_32_unlock_up;
429 extern commpage_descriptor spinlock_64_unlock_mp;
430 extern commpage_descriptor spinlock_64_unlock_up;
431 extern commpage_descriptor pthread_getspecific_sprg3;
432 extern commpage_descriptor pthread_getspecific_uftrap;
433 extern commpage_descriptor gettimeofday_32;
434 extern commpage_descriptor gettimeofday_64;
435 extern commpage_descriptor commpage_flush_dcache;
436 extern commpage_descriptor commpage_flush_icache;
437 extern commpage_descriptor pthread_self_sprg3;
438 extern commpage_descriptor pthread_self_uftrap;
439 extern commpage_descriptor spinlock_relinquish;
440 extern commpage_descriptor bzero_32;
441 extern commpage_descriptor bzero_128;
442 extern commpage_descriptor bcopy_g3;
443 extern commpage_descriptor bcopy_g4;
444 extern commpage_descriptor bcopy_970;
445 extern commpage_descriptor bcopy_64;
446 extern commpage_descriptor bigcopy_970;
447
448 static commpage_descriptor *routines[] = {
449 &mach_absolute_time_32,
450 &mach_absolute_time_64,
451 &spinlock_32_try_mp,
452 &spinlock_32_try_up,
453 &spinlock_64_try_mp,
454 &spinlock_64_try_up,
455 &spinlock_32_lock_mp,
456 &spinlock_32_lock_up,
457 &spinlock_64_lock_mp,
458 &spinlock_64_lock_up,
459 &spinlock_32_unlock_mp,
460 &spinlock_32_unlock_up,
461 &spinlock_64_unlock_mp,
462 &spinlock_64_unlock_up,
463 &pthread_getspecific_sprg3,
464 &pthread_getspecific_uftrap,
465 &gettimeofday_32,
466 &gettimeofday_64,
467 &commpage_flush_dcache,
468 &commpage_flush_icache,
469 &pthread_self_sprg3,
470 &pthread_self_uftrap,
471 &spinlock_relinquish,
472 &bzero_32,
473 &bzero_128,
474 &bcopy_g3,
475 &bcopy_g4,
476 &bcopy_970,
477 &bcopy_64,
478 &bigcopy_970,
479 NULL };
480
481 for( rd = routines; *rd != NULL ; rd++ )
482 commpage_stuff_routine(*rd);
483
484 if (!matched)
485 panic("commpage no match on last routine");
486
487 if (next > (commPagePtr + _COMM_PAGE_AREA_USED))
488 panic("commpage overflow");
489
490 sync_cache_virtual((vm_offset_t) commPagePtr,_COMM_PAGE_AREA_USED); // make all that new code executable
491
492 }