]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/commpage/commpage.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / i386 / commpage / commpage.c
CommitLineData
43866e37 1/*
4452a7af 2 * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved.
43866e37 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
43866e37 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
43866e37
A
27 */
28
55e303ae
A
29/*
30 * Here's what to do if you want to add a new routine to the comm page:
31 *
4452a7af 32 * 1. Add a definition for it's address in osfmk/i386/cpu_capabilities.h,
55e303ae
A
33 * being careful to reserve room for future expansion.
34 *
35 * 2. Write one or more versions of the routine, each with it's own
36 * commpage_descriptor. The tricky part is getting the "special",
37 * "musthave", and "canthave" fields right, so that exactly one
38 * version of the routine is selected for every machine.
4452a7af 39 * The source files should be in osfmk/i386/commpage/.
55e303ae
A
40 *
41 * 3. Add a ptr to your new commpage_descriptor(s) in the "routines"
4452a7af
A
42 * array in osfmk/i386/commpage/commpage_asm.s. There are two
43 * arrays, one for the 32-bit and one for the 64-bit commpage.
55e303ae
A
44 *
45 * 4. Write the code in Libc to use the new routine.
46 */
47
48#include <mach/mach_types.h>
49#include <mach/machine.h>
91447636 50#include <mach/vm_map.h>
55e303ae 51#include <i386/machine_routines.h>
4452a7af 52#include <i386/misc_protos.h>
43866e37
A
53#include <machine/cpu_capabilities.h>
54#include <machine/commpage.h>
55e303ae
A
55#include <machine/pmap.h>
56#include <vm/vm_kern.h>
91447636
A
57#include <vm/vm_map.h>
58#include <ipc/ipc_port.h>
59
4452a7af 60#include <kern/page_decrypt.h>
89b3af67 61
4452a7af
A
62/* the lists of commpage routines are in commpage_asm.s */
63extern commpage_descriptor* commpage_32_routines[];
64extern commpage_descriptor* commpage_64_routines[];
89b3af67 65
4452a7af
A
66/* translated commpage descriptors from commpage_sigs.c */
67extern commpage_descriptor sigdata_descriptor;
68extern commpage_descriptor *ba_descriptors[];
69
70extern vm_map_t com_region_map32; // the shared submap, set up in vm init
71extern vm_map_t com_region_map64; // the shared submap, set up in vm init
89b3af67 72
4452a7af
A
73char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage
74char *commPagePtr64 = NULL; // ...and of 64-bit commpage
21362eb3
A
75int _cpu_capabilities = 0; // define the capability vector
76
4452a7af
A
77int noVMX = 0; /* if true, do not set kHasAltivec in ppc _cpu_capabilities */
78
79void* dsmos_blobs[3]; /* ptrs to the system integrity data in each commpage */
80int dsmos_blob_count = 0;
81
82static uintptr_t next; // next available byte in comm page
83static int cur_routine; // comm page address of "current" routine
84static int matched; // true if we've found a match for "current" routine
85
86static char *commPagePtr; // virtual addr in kernel map of commpage we are working on
87static size_t commPageBaseOffset; // add to 32-bit runtime address to get offset in commpage
55e303ae
A
88
89/* Allocate the commpage and add to the shared submap created by vm:
90 * 1. allocate a page in the kernel map (RW)
91 * 2. wire it down
92 * 3. make a memory entry out of it
93 * 4. map that entry into the shared comm region map (R-only)
94 */
95
96static void*
4452a7af
A
97commpage_allocate(
98 vm_map_t submap, // com_region_map32 or com_region_map64
99 size_t area_used ) // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
55e303ae 100{
4452a7af
A
101 vm_offset_t kernel_addr; // address of commpage in kernel map
102 vm_offset_t zero = 0;
103 vm_size_t size = area_used; // size actually populated
104 vm_map_entry_t entry;
105 ipc_port_t handle;
106
107 if (submap == NULL)
108 panic("commpage submap is null");
109
110 if (vm_allocate(kernel_map,&kernel_addr,area_used,VM_FLAGS_ANYWHERE))
111 panic("cannot allocate commpage");
112
113 if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE))
114 panic("cannot wire commpage");
115
116 /*
117 * Now that the object is created and wired into the kernel map, mark it so that no delay
118 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
119 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
120 * that would be a real disaster.
121 *
122 * JMM - What we really need is a way to create it like this in the first place.
123 */
124 if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map)
125 panic("cannot find commpage entry");
126 entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
127
128 if (mach_make_memory_entry( kernel_map, // target map
129 &size, // size
130 kernel_addr, // offset (address in kernel map)
131 VM_PROT_DEFAULT, // map it RW
132 &handle, // this is the object handle we get
133 NULL )) // parent_entry (what is this?)
134 panic("cannot make entry for commpage");
135
136 if (vm_map_64( submap, // target map (shared submap)
137 &zero, // address (map into 1st page in submap)
138 area_used, // size
139 0, // mask
140 VM_FLAGS_FIXED, // flags (it must be 1st page in submap)
141 handle, // port is the memory entry we just made
142 0, // offset (map 1st page in memory entry)
143 FALSE, // copy
144 VM_PROT_READ, // cur_protection (R-only in user map)
145 VM_PROT_READ, // max_protection
146 VM_INHERIT_SHARE )) // inheritance
147 panic("cannot map commpage");
148
149 ipc_port_release(handle);
150
151 return (void*) kernel_addr; // return address in kernel map
55e303ae
A
152}
153
154/* Get address (in kernel map) of a commpage field. */
155
91447636 156static void*
55e303ae
A
157commpage_addr_of(
158 int addr_at_runtime )
159{
4452a7af 160 return (void*) ((uintptr_t)commPagePtr + addr_at_runtime - commPageBaseOffset);
55e303ae
A
161}
162
163/* Determine number of CPUs on this system. We cannot rely on
164 * machine_info.max_cpus this early in the boot.
165 */
166static int
167commpage_cpus( void )
168{
169 int cpus;
170
171 cpus = ml_get_max_cpus(); // NB: this call can block
172
173 if (cpus == 0)
174 panic("commpage cpus==0");
175 if (cpus > 0xFF)
176 cpus = 0xFF;
177
178 return cpus;
179}
43866e37 180
55e303ae 181/* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */
43866e37 182
55e303ae
A
183static void
184commpage_init_cpu_capabilities( void )
185{
186 int bits;
187 int cpus;
188 ml_cpu_info_t cpu_info;
43866e37 189
55e303ae
A
190 bits = 0;
191 ml_cpu_get_info(&cpu_info);
192
193 switch (cpu_info.vector_unit) {
4452a7af
A
194 case 6:
195 bits |= kHasSupplementalSSE3;
196 /* fall thru */
55e303ae 197 case 5:
91447636 198 bits |= kHasSSE3;
55e303ae
A
199 /* fall thru */
200 case 4:
201 bits |= kHasSSE2;
202 /* fall thru */
203 case 3:
204 bits |= kHasSSE;
205 /* fall thru */
206 case 2:
207 bits |= kHasMMX;
208 default:
209 break;
210 }
211 switch (cpu_info.cache_line_size) {
212 case 128:
213 bits |= kCache128;
214 break;
215 case 64:
216 bits |= kCache64;
217 break;
218 case 32:
219 bits |= kCache32;
220 break;
221 default:
222 break;
223 }
224 cpus = commpage_cpus(); // how many CPUs do we have
225
226 if (cpus == 1)
227 bits |= kUP;
228
229 bits |= (cpus << kNumCPUsShift);
230
91447636
A
231 bits |= kFastThreadLocalStorage; // we use %gs for TLS
232
4452a7af
A
233 if (cpu_mode_is64bit()) // k64Bit means processor is 64-bit capable
234 bits |= k64Bit;
235
55e303ae
A
236 _cpu_capabilities = bits; // set kernel version for use by drivers etc
237}
238
4452a7af
A
239int
240_get_cpu_capabilities()
241{
242 return _cpu_capabilities;
243}
244
55e303ae
A
245/* Copy data into commpage. */
246
247static void
248commpage_stuff(
249 int address,
4452a7af 250 const void *source,
55e303ae
A
251 int length )
252{
253 void *dest = commpage_addr_of(address);
254
255 if ((uintptr_t)dest < next)
91447636 256 panic("commpage overlap at address 0x%x, 0x%x < 0x%x", address, dest, next);
55e303ae
A
257
258 bcopy(source,dest,length);
43866e37 259
55e303ae
A
260 next = ((uintptr_t)dest + length);
261}
262
4452a7af
A
263static void
264commpage_stuff_swap(
265 int address,
266 void *source,
267 int length,
268 int legacy )
269{
270 if ( legacy ) {
271 void *dest = commpage_addr_of(address);
272 dest = (void *)((uintptr_t) dest + _COMM_PAGE_SIGS_OFFSET);
273 switch (length) {
274 case 2:
275 OSWriteSwapInt16(dest, 0, *(uint16_t *)source);
276 break;
277 case 4:
278 OSWriteSwapInt32(dest, 0, *(uint32_t *)source);
279 break;
280 case 8:
281 OSWriteSwapInt64(dest, 0, *(uint64_t *)source);
282 break;
283 }
284 }
285}
91447636
A
286
287static void
288commpage_stuff2(
4452a7af
A
289 int address,
290 void *source,
291 int length,
292 int legacy )
91447636 293{
4452a7af 294 commpage_stuff_swap(address, source, length, legacy);
91447636
A
295 commpage_stuff(address, source, length);
296}
297
55e303ae
A
298/* Copy a routine into comm page if it matches running machine.
299 */
300static void
301commpage_stuff_routine(
302 commpage_descriptor *rd )
303{
304 int must,cant;
305
306 if (rd->commpage_address != cur_routine) {
307 if ((cur_routine!=0) && (matched==0))
4452a7af 308 panic("commpage no match for last, next address %08x", rd->commpage_address);
55e303ae
A
309 cur_routine = rd->commpage_address;
310 matched = 0;
311 }
312
313 must = _cpu_capabilities & rd->musthave;
314 cant = _cpu_capabilities & rd->canthave;
315
316 if ((must == rd->musthave) && (cant == 0)) {
317 if (matched)
4452a7af 318 panic("commpage multiple matches for address %08x", rd->commpage_address);
55e303ae
A
319 matched = 1;
320
321 commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
322 }
323}
324
4452a7af
A
325/* Fill in the 32- or 64-bit commpage. Called once for each.
326 * The 32-bit ("legacy") commpage has a bunch of stuff added to it
327 * for translated processes, some of which is byte-swapped.
55e303ae
A
328 */
329
4452a7af
A
330static void
331commpage_populate_one(
332 vm_map_t submap, // com_region_map32 or com_region_map64
333 char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64
334 size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
335 size_t base_offset, // will become commPageBaseOffset
336 commpage_descriptor** commpage_routines, // list of routine ptrs for this commpage
337 boolean_t legacy, // true if 32-bit commpage
338 const char* signature ) // "commpage 32-bit" or "commpage 64-bit"
55e303ae 339{
91447636
A
340 short c2;
341 static double two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52
342 static double ten6 = 1000000.0; // 10**6
55e303ae
A
343 commpage_descriptor **rd;
344 short version = _COMM_PAGE_THIS_VERSION;
4452a7af 345 int swapcaps;
55e303ae 346
4452a7af
A
347 next = (uintptr_t) NULL;
348 cur_routine = 0;
349 commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used );
350 *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64
351 commPageBaseOffset = base_offset;
55e303ae
A
352
353 /* Stuff in the constants. We move things into the comm page in strictly
354 * ascending order, so we can check for overlap and panic if so.
355 */
4452a7af
A
356 commpage_stuff(_COMM_PAGE_SIGNATURE,signature,strlen(signature));
357 commpage_stuff2(_COMM_PAGE_VERSION,&version,sizeof(short),legacy);
358 commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
359
360 /* excuse our magic constants, we cannot include ppc/cpu_capabilities.h */
361 /* always set kCache32 and kDcbaAvailable */
362 swapcaps = 0x44;
363 if ( _cpu_capabilities & kUP )
364 swapcaps |= (kUP + (1 << kNumCPUsShift));
365 else
366 swapcaps |= 2 << kNumCPUsShift; /* limit #cpus to 2 */
367 if ( ! noVMX ) /* if rosetta will be emulating altivec... */
368 swapcaps |= 0x101; /* ...then set kHasAltivec and kDataStreamsAvailable too */
369 commpage_stuff_swap(_COMM_PAGE_CPU_CAPABILITIES, &swapcaps, sizeof(int), legacy);
370 c2 = 32;
371 commpage_stuff_swap(_COMM_PAGE_CACHE_LINESIZE,&c2,2,legacy);
55e303ae 372
91447636
A
373 if (_cpu_capabilities & kCache32)
374 c2 = 32;
375 else if (_cpu_capabilities & kCache64)
376 c2 = 64;
377 else if (_cpu_capabilities & kCache128)
378 c2 = 128;
379 commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
380
4452a7af
A
381 if ( legacy ) {
382 commpage_stuff2(_COMM_PAGE_2_TO_52,&two52,8,legacy);
383 commpage_stuff2(_COMM_PAGE_10_TO_6,&ten6,8,legacy);
384 }
21362eb3 385
4452a7af 386 for( rd = commpage_routines; *rd != NULL ; rd++ )
55e303ae
A
387 commpage_stuff_routine(*rd);
388
389 if (!matched)
390 panic("commpage no match on last routine");
391
91447636
A
392 if (next > (uintptr_t)_COMM_PAGE_END)
393 panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%08x", next, (uintptr_t)commPagePtr);
394
4452a7af
A
395 if ( legacy ) {
396 next = (uintptr_t) NULL;
397 for( rd = ba_descriptors; *rd != NULL ; rd++ )
398 commpage_stuff_routine(*rd);
399
400 next = (uintptr_t) NULL;
401 commpage_stuff_routine(&sigdata_descriptor);
402 }
5d5c5d0d 403
4452a7af
A
404 /* salt away a ptr to the system integrity data in this commpage */
405 dsmos_blobs[dsmos_blob_count++] =
406 commpage_addr_of( _COMM_PAGE_SYSTEM_INTEGRITY );
43866e37 407}
91447636 408
4452a7af
A
409
410/* Fill in commpages: called once, during kernel initialization, from the
411 * startup thread before user-mode code is running.
412 *
413 * See the top of this file for a list of what you have to do to add
414 * a new routine to the commpage.
415 */
91447636
A
416
417void
4452a7af 418commpage_populate( void )
91447636 419{
4452a7af
A
420 commpage_init_cpu_capabilities();
421
422 commpage_populate_one( com_region_map32,
423 &commPagePtr32,
424 _COMM_PAGE32_AREA_USED,
425 _COMM_PAGE32_BASE_ADDRESS,
426 commpage_32_routines,
427 TRUE, /* legacy (32-bit) commpage */
428 "commpage 32-bit");
429 pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS,
430 _COMM_PAGE32_AREA_USED/INTEL_PGBYTES);
431
432 if (_cpu_capabilities & k64Bit) {
433 commpage_populate_one( com_region_map64,
434 &commPagePtr64,
435 _COMM_PAGE64_AREA_USED,
436 _COMM_PAGE32_START_ADDRESS, /* because kernel is built 32-bit */
437 commpage_64_routines,
438 FALSE, /* not a legacy commpage */
439 "commpage 64-bit");
440 pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS,
441 _COMM_PAGE64_AREA_USED/INTEL_PGBYTES);
442 }
21362eb3 443
4452a7af 444 rtc_nanotime_init_commpage();
91447636 445}