]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_region.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_region.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24/*
25 * Shared region (... and comm page)
26 *
27 * This file handles the VM shared region and comm page.
28 *
29 */
30/*
31 * SHARED REGIONS
32 * --------------
33 *
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
37 *
38 * The point of a shared region is to reduce the setup overhead when exec'ing
39 * a new process.
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
45 * region.
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
51 *
52 *
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
58 *
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
63 *
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
66 *
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
69 *
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
74 */
75/*
76 * COMM PAGE
77 *
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
83 *
84 * The comm pages are created and populated at boot time.
85 *
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
89 *
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
92 *
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
97 */
98
99#include <debug.h>
100
101#include <kern/ipc_tt.h>
102#include <kern/kalloc.h>
b0d623f7 103#include <kern/thread_call.h>
2d21ac55 104
4a3eedf9
A
105#include <mach/mach_vm.h>
106
2d21ac55
A
107#include <vm/vm_map.h>
108#include <vm/vm_shared_region.h>
109
110#include <vm/vm_protos.h>
111
112#include <machine/commpage.h>
113#include <machine/cpu_capabilities.h>
114
115/* "dyld" uses this to figure out what the kernel supports */
116int shared_region_version = 3;
117
2d21ac55
A
118/* trace level, output is sent to the system log file */
119int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL;
120
b0d623f7
A
121/* should local (non-chroot) shared regions persist when no task uses them ? */
122int shared_region_persistence = 0; /* no by default */
123
124/* delay before reclaiming an unused shared region */
125int shared_region_destroy_delay = 120; /* in seconds */
126
2d21ac55
A
127/* this lock protects all the shared region data structures */
128lck_grp_t *vm_shared_region_lck_grp;
129lck_mtx_t vm_shared_region_lock;
130
131#define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
132#define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
133#define vm_shared_region_sleep(event, interruptible) \
134 lck_mtx_sleep(&vm_shared_region_lock, \
135 LCK_SLEEP_DEFAULT, \
136 (event_t) (event), \
137 (interruptible))
138
139/* the list of currently available shared regions (one per environment) */
140queue_head_t vm_shared_region_queue;
141
142static void vm_shared_region_reference_locked(vm_shared_region_t shared_region);
143static vm_shared_region_t vm_shared_region_create(
144 void *root_dir,
145 cpu_type_t cputype,
146 boolean_t is_64bit);
147static void vm_shared_region_destroy(vm_shared_region_t shared_region);
148
b0d623f7
A
149static void vm_shared_region_timeout(thread_call_param_t param0,
150 thread_call_param_t param1);
151
152static int __commpage_setup = 0;
153#if defined(__i386__) || defined(__x86_64__)
154static int __system_power_source = 1; /* init to extrnal power source */
155static void post_sys_powersource_internal(int i, int internal);
156#endif /* __i386__ || __x86_64__ */
157
158
2d21ac55
A
159/*
160 * Initialize the module...
161 */
162void
163vm_shared_region_init(void)
164{
165 SHARED_REGION_TRACE_DEBUG(
166 ("shared_region: -> init\n"));
167
168 vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region",
169 LCK_GRP_ATTR_NULL);
170 lck_mtx_init(&vm_shared_region_lock,
171 vm_shared_region_lck_grp,
172 LCK_ATTR_NULL);
173
174 queue_init(&vm_shared_region_queue);
175
176 SHARED_REGION_TRACE_DEBUG(
177 ("shared_region: <- init\n"));
178}
179
180/*
181 * Retrieve a task's shared region and grab an extra reference to
182 * make sure it doesn't disappear while the caller is using it.
183 * The caller is responsible for consuming that extra reference if
184 * necessary.
185 */
186vm_shared_region_t
187vm_shared_region_get(
188 task_t task)
189{
190 vm_shared_region_t shared_region;
191
192 SHARED_REGION_TRACE_DEBUG(
193 ("shared_region: -> get(%p)\n",
194 task));
195
196 task_lock(task);
197 vm_shared_region_lock();
198 shared_region = task->shared_region;
199 if (shared_region) {
200 assert(shared_region->sr_ref_count > 0);
201 vm_shared_region_reference_locked(shared_region);
202 }
203 vm_shared_region_unlock();
204 task_unlock(task);
205
206 SHARED_REGION_TRACE_DEBUG(
207 ("shared_region: get(%p) <- %p\n",
208 task, shared_region));
209
210 return shared_region;
211}
212
213/*
214 * Get the base address of the shared region.
215 * That's the address at which it needs to be mapped in the process's address
216 * space.
217 * No need to lock since this data is set when the shared region is
218 * created and is never modified after that. The caller must hold an extra
219 * reference on the shared region to prevent it from being destroyed.
220 */
221mach_vm_offset_t
222vm_shared_region_base_address(
223 vm_shared_region_t shared_region)
224{
225 SHARED_REGION_TRACE_DEBUG(
226 ("shared_region: -> base_address(%p)\n",
227 shared_region));
228 assert(shared_region->sr_ref_count > 1);
229 SHARED_REGION_TRACE_DEBUG(
230 ("shared_region: base_address(%p) <- 0x%llx\n",
231 shared_region, (long long)shared_region->sr_base_address));
232 return shared_region->sr_base_address;
233}
234
235/*
236 * Get the size of the shared region.
237 * That's the size that needs to be mapped in the process's address
238 * space.
239 * No need to lock since this data is set when the shared region is
240 * created and is never modified after that. The caller must hold an extra
241 * reference on the shared region to prevent it from being destroyed.
242 */
243mach_vm_size_t
244vm_shared_region_size(
245 vm_shared_region_t shared_region)
246{
247 SHARED_REGION_TRACE_DEBUG(
248 ("shared_region: -> size(%p)\n",
249 shared_region));
250 assert(shared_region->sr_ref_count > 1);
251 SHARED_REGION_TRACE_DEBUG(
252 ("shared_region: size(%p) <- 0x%llx\n",
253 shared_region, (long long)shared_region->sr_size));
254 return shared_region->sr_size;
255}
256
257/*
258 * Get the memory entry of the shared region.
259 * That's the "memory object" that needs to be mapped in the process's address
260 * space.
261 * No need to lock since this data is set when the shared region is
262 * created and is never modified after that. The caller must hold an extra
263 * reference on the shared region to prevent it from being destroyed.
264 */
265ipc_port_t
266vm_shared_region_mem_entry(
267 vm_shared_region_t shared_region)
268{
269 SHARED_REGION_TRACE_DEBUG(
270 ("shared_region: -> mem_entry(%p)\n",
271 shared_region));
272 assert(shared_region->sr_ref_count > 1);
273 SHARED_REGION_TRACE_DEBUG(
274 ("shared_region: mem_entry(%p) <- %p\n",
275 shared_region, shared_region->sr_mem_entry));
276 return shared_region->sr_mem_entry;
277}
278
279/*
280 * Set the shared region the process should use.
281 * A NULL new shared region means that we just want to release the old
282 * shared region.
283 * The caller should already have an extra reference on the new shared region
284 * (if any). We release a reference on the old shared region (if any).
285 */
286void
287vm_shared_region_set(
288 task_t task,
289 vm_shared_region_t new_shared_region)
290{
291 vm_shared_region_t old_shared_region;
292
293 SHARED_REGION_TRACE_DEBUG(
294 ("shared_region: -> set(%p, %p)\n",
295 task, new_shared_region));
296
297 task_lock(task);
298 vm_shared_region_lock();
299
300 old_shared_region = task->shared_region;
301 if (new_shared_region) {
302 assert(new_shared_region->sr_ref_count > 0);
303 }
304
305 task->shared_region = new_shared_region;
306
307 vm_shared_region_unlock();
308 task_unlock(task);
309
310 if (old_shared_region) {
311 assert(old_shared_region->sr_ref_count > 0);
312 vm_shared_region_deallocate(old_shared_region);
313 }
314
315 SHARED_REGION_TRACE_DEBUG(
316 ("shared_region: set(%p) <- old=%p new=%p\n",
317 task, old_shared_region, new_shared_region));
318}
319
320/*
321 * Lookup up the shared region for the desired environment.
322 * If none is found, create a new (empty) one.
323 * Grab an extra reference on the returned shared region, to make sure
324 * it doesn't get destroyed before the caller is done with it. The caller
325 * is responsible for consuming that extra reference if necessary.
326 */
327vm_shared_region_t
328vm_shared_region_lookup(
329 void *root_dir,
330 cpu_type_t cputype,
331 boolean_t is_64bit)
332{
333 vm_shared_region_t shared_region;
334 vm_shared_region_t new_shared_region;
335
336 SHARED_REGION_TRACE_DEBUG(
337 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
338 root_dir, cputype, is_64bit));
339
340 shared_region = NULL;
341 new_shared_region = NULL;
342
343 vm_shared_region_lock();
344 for (;;) {
345 queue_iterate(&vm_shared_region_queue,
346 shared_region,
347 vm_shared_region_t,
348 sr_q) {
349 assert(shared_region->sr_ref_count > 0);
350 if (shared_region->sr_cpu_type == cputype &&
351 shared_region->sr_root_dir == root_dir &&
352 shared_region->sr_64bit == is_64bit) {
353 /* found a match ! */
354 vm_shared_region_reference_locked(shared_region);
355 goto done;
356 }
357 }
358 if (new_shared_region == NULL) {
359 /* no match: create a new one */
360 vm_shared_region_unlock();
361 new_shared_region = vm_shared_region_create(root_dir,
362 cputype,
363 is_64bit);
364 /* do the lookup again, in case we lost a race */
365 vm_shared_region_lock();
366 continue;
367 }
368 /* still no match: use our new one */
369 shared_region = new_shared_region;
370 new_shared_region = NULL;
371 queue_enter(&vm_shared_region_queue,
372 shared_region,
373 vm_shared_region_t,
374 sr_q);
375 break;
376 }
377
378done:
379 vm_shared_region_unlock();
380
381 if (new_shared_region) {
382 /*
383 * We lost a race with someone else to create a new shared
384 * region for that environment. Get rid of our unused one.
385 */
386 assert(new_shared_region->sr_ref_count == 1);
387 new_shared_region->sr_ref_count--;
388 vm_shared_region_destroy(new_shared_region);
389 new_shared_region = NULL;
390 }
391
392 SHARED_REGION_TRACE_DEBUG(
393 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
394 root_dir, cputype, is_64bit, shared_region));
395
396 assert(shared_region->sr_ref_count > 0);
397 return shared_region;
398}
399
400/*
401 * Take an extra reference on a shared region.
402 * The vm_shared_region_lock should already be held by the caller.
403 */
404static void
405vm_shared_region_reference_locked(
406 vm_shared_region_t shared_region)
407{
408#if DEBUG
409 lck_mtx_assert(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED);
410#endif
411
412 SHARED_REGION_TRACE_DEBUG(
413 ("shared_region: -> reference_locked(%p)\n",
414 shared_region));
415 assert(shared_region->sr_ref_count > 0);
416 shared_region->sr_ref_count++;
b0d623f7
A
417
418 if (shared_region->sr_timer_call != NULL) {
419 boolean_t cancelled;
420
421 /* cancel and free any pending timeout */
422 cancelled = thread_call_cancel(shared_region->sr_timer_call);
423 if (cancelled) {
424 thread_call_free(shared_region->sr_timer_call);
425 shared_region->sr_timer_call = NULL;
426 /* release the reference held by the cancelled timer */
427 shared_region->sr_ref_count--;
428 } else {
429 /* the timer will drop the reference and free itself */
430 }
431 }
432
2d21ac55
A
433 SHARED_REGION_TRACE_DEBUG(
434 ("shared_region: reference_locked(%p) <- %d\n",
435 shared_region, shared_region->sr_ref_count));
436}
437
438/*
439 * Release a reference on the shared region.
440 * Destroy it if there are no references left.
441 */
442void
443vm_shared_region_deallocate(
444 vm_shared_region_t shared_region)
445{
446 SHARED_REGION_TRACE_DEBUG(
447 ("shared_region: -> deallocate(%p)\n",
448 shared_region));
449
450 vm_shared_region_lock();
451
452 assert(shared_region->sr_ref_count > 0);
453
454 if (shared_region->sr_root_dir == NULL) {
455 /*
456 * Local (i.e. based on the boot volume) shared regions
457 * can persist or not based on the "shared_region_persistence"
458 * sysctl.
459 * Make sure that this one complies.
460 */
461 if (shared_region_persistence &&
462 !shared_region->sr_persists) {
463 /* make this one persistent */
464 shared_region->sr_ref_count++;
465 shared_region->sr_persists = TRUE;
466 } else if (!shared_region_persistence &&
467 shared_region->sr_persists) {
468 /* make this one no longer persistent */
469 assert(shared_region->sr_ref_count > 1);
470 shared_region->sr_ref_count--;
471 shared_region->sr_persists = FALSE;
472 }
473 }
474
475 assert(shared_region->sr_ref_count > 0);
476 shared_region->sr_ref_count--;
477 SHARED_REGION_TRACE_DEBUG(
478 ("shared_region: deallocate(%p): ref now %d\n",
479 shared_region, shared_region->sr_ref_count));
480
481 if (shared_region->sr_ref_count == 0) {
b0d623f7
A
482 uint64_t deadline;
483
484 if (shared_region->sr_timer_call == NULL) {
485 /* hold one reference for the timer */
486 assert(! shared_region->sr_mapping_in_progress);
487 shared_region->sr_ref_count++;
488
489 /* set up the timer */
490 shared_region->sr_timer_call = thread_call_allocate(
491 (thread_call_func_t) vm_shared_region_timeout,
492 (thread_call_param_t) shared_region);
493
494 /* schedule the timer */
495 clock_interval_to_deadline(shared_region_destroy_delay,
496 1000 * 1000 * 1000,
497 &deadline);
498 thread_call_enter_delayed(shared_region->sr_timer_call,
499 deadline);
500
501 SHARED_REGION_TRACE_DEBUG(
502 ("shared_region: deallocate(%p): armed timer\n",
503 shared_region));
504
505 vm_shared_region_unlock();
506 } else {
507 /* timer expired: let go of this shared region */
508
509 /*
510 * Remove it from the queue first, so no one can find
511 * it...
512 */
513 queue_remove(&vm_shared_region_queue,
514 shared_region,
515 vm_shared_region_t,
516 sr_q);
517 vm_shared_region_unlock();
518 /* ... and destroy it */
519 vm_shared_region_destroy(shared_region);
520 shared_region = NULL;
521 }
2d21ac55
A
522 } else {
523 vm_shared_region_unlock();
524 }
525
526 SHARED_REGION_TRACE_DEBUG(
527 ("shared_region: deallocate(%p) <-\n",
528 shared_region));
529}
530
b0d623f7
A
531void
532vm_shared_region_timeout(
533 thread_call_param_t param0,
534 __unused thread_call_param_t param1)
535{
536 vm_shared_region_t shared_region;
537
538 shared_region = (vm_shared_region_t) param0;
539
540 vm_shared_region_deallocate(shared_region);
541}
542
2d21ac55
A
543/*
544 * Create a new (empty) shared region for a new environment.
545 */
546static vm_shared_region_t
547vm_shared_region_create(
548 void *root_dir,
549 cpu_type_t cputype,
550 boolean_t is_64bit)
551{
552 kern_return_t kr;
553 vm_named_entry_t mem_entry;
554 ipc_port_t mem_entry_port;
555 vm_shared_region_t shared_region;
556 vm_map_t sub_map;
557 mach_vm_offset_t base_address, pmap_nesting_start;
558 mach_vm_size_t size, pmap_nesting_size;
559
560 SHARED_REGION_TRACE_DEBUG(
561 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
562 root_dir, cputype, is_64bit));
563
564 base_address = 0;
565 size = 0;
566 mem_entry = NULL;
567 mem_entry_port = IPC_PORT_NULL;
568 sub_map = VM_MAP_NULL;
569
570 /* create a new shared region structure... */
571 shared_region = kalloc(sizeof (*shared_region));
572 if (shared_region == NULL) {
573 SHARED_REGION_TRACE_ERROR(
574 ("shared_region: create: couldn't allocate\n"));
575 goto done;
576 }
577
578 /* figure out the correct settings for the desired environment */
579 if (is_64bit) {
580 switch (cputype) {
581 case CPU_TYPE_I386:
582 base_address = SHARED_REGION_BASE_X86_64;
583 size = SHARED_REGION_SIZE_X86_64;
584 pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64;
585 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64;
586 break;
587 case CPU_TYPE_POWERPC:
588 base_address = SHARED_REGION_BASE_PPC64;
589 size = SHARED_REGION_SIZE_PPC64;
590 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64;
591 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64;
592 break;
593 default:
594 SHARED_REGION_TRACE_ERROR(
595 ("shared_region: create: unknown cpu type %d\n",
596 cputype));
597 kfree(shared_region, sizeof (*shared_region));
598 shared_region = NULL;
599 goto done;
600 }
601 } else {
602 switch (cputype) {
603 case CPU_TYPE_I386:
604 base_address = SHARED_REGION_BASE_I386;
605 size = SHARED_REGION_SIZE_I386;
606 pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386;
607 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386;
608 break;
609 case CPU_TYPE_POWERPC:
610 base_address = SHARED_REGION_BASE_PPC;
611 size = SHARED_REGION_SIZE_PPC;
612 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC;
613 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC;
614 break;
615#ifdef CPU_TYPE_ARM
616 case CPU_TYPE_ARM:
617 base_address = SHARED_REGION_BASE_ARM;
618 size = SHARED_REGION_SIZE_ARM;
619 pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM;
620 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM;
621 break;
622#endif /* CPU_TYPE_ARM */
623 default:
624 SHARED_REGION_TRACE_ERROR(
625 ("shared_region: create: unknown cpu type %d\n",
626 cputype));
627 kfree(shared_region, sizeof (*shared_region));
628 shared_region = NULL;
629 goto done;
630
631 }
632 }
633
634 /* create a memory entry structure and a Mach port handle */
635 kr = mach_memory_entry_allocate(&mem_entry,
636 &mem_entry_port);
637 if (kr != KERN_SUCCESS) {
638 kfree(shared_region, sizeof (*shared_region));
639 shared_region = NULL;
640 SHARED_REGION_TRACE_ERROR(
641 ("shared_region: create: "
642 "couldn't allocate mem_entry\n"));
643 goto done;
644 }
645
646 /* create a VM sub map and its pmap */
647 sub_map = vm_map_create(pmap_create(0, is_64bit),
648 0, size,
649 TRUE);
650 if (sub_map == VM_MAP_NULL) {
651 ipc_port_release_send(mem_entry_port);
652 kfree(shared_region, sizeof (*shared_region));
653 shared_region = NULL;
654 SHARED_REGION_TRACE_ERROR(
655 ("shared_region: create: "
656 "couldn't allocate map\n"));
657 goto done;
658 }
659
660 /* make the memory entry point to the VM sub map */
661 mem_entry->is_sub_map = TRUE;
662 mem_entry->backing.map = sub_map;
663 mem_entry->size = size;
664 mem_entry->protection = VM_PROT_ALL;
665
666 /* make the shared region point at the memory entry */
667 shared_region->sr_mem_entry = mem_entry_port;
668
669 /* fill in the shared region's environment and settings */
670 shared_region->sr_base_address = base_address;
671 shared_region->sr_size = size;
672 shared_region->sr_pmap_nesting_start = pmap_nesting_start;
673 shared_region->sr_pmap_nesting_size = pmap_nesting_size;
674 shared_region->sr_cpu_type = cputype;
675 shared_region->sr_64bit = is_64bit;
676 shared_region->sr_root_dir = root_dir;
677
678 queue_init(&shared_region->sr_q);
679 shared_region->sr_mapping_in_progress = FALSE;
680 shared_region->sr_persists = FALSE;
b0d623f7 681 shared_region->sr_timer_call = NULL;
2d21ac55
A
682 shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
683
684 /* grab a reference for the caller */
685 shared_region->sr_ref_count = 1;
686
687done:
688 if (shared_region) {
689 SHARED_REGION_TRACE_INFO(
690 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
691 "base=0x%llx,size=0x%llx) <- "
692 "%p mem=(%p,%p) map=%p pmap=%p\n",
693 root_dir, cputype, is_64bit, (long long)base_address,
694 (long long)size, shared_region,
695 mem_entry_port, mem_entry, sub_map, sub_map->pmap));
696 } else {
697 SHARED_REGION_TRACE_INFO(
698 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
699 "base=0x%llx,size=0x%llx) <- NULL",
700 root_dir, cputype, is_64bit, (long long)base_address,
701 (long long)size));
702 }
703 return shared_region;
704}
705
706/*
707 * Destroy a now-unused shared region.
708 * The shared region is no longer in the queue and can not be looked up.
709 */
710static void
711vm_shared_region_destroy(
712 vm_shared_region_t shared_region)
713{
714 vm_named_entry_t mem_entry;
715 vm_map_t map;
716
717 SHARED_REGION_TRACE_INFO(
718 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
719 shared_region,
720 shared_region->sr_root_dir,
721 shared_region->sr_cpu_type,
722 shared_region->sr_64bit));
723
724 assert(shared_region->sr_ref_count == 0);
725 assert(!shared_region->sr_persists);
726
727 mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject;
728 assert(mem_entry->is_sub_map);
729 assert(!mem_entry->internal);
730 assert(!mem_entry->is_pager);
731 map = mem_entry->backing.map;
732
733 /*
734 * Clean up the pmap first. The virtual addresses that were
735 * entered in this possibly "nested" pmap may have different values
736 * than the VM map's min and max offsets, if the VM sub map was
737 * mapped at a non-zero offset in the processes' main VM maps, which
738 * is usually the case, so the clean-up we do in vm_map_destroy() would
739 * not be enough.
740 */
741 if (map->pmap) {
742 pmap_remove(map->pmap,
743 shared_region->sr_base_address,
744 (shared_region->sr_base_address +
745 shared_region->sr_size));
746 }
747
748 /*
749 * Release our (one and only) handle on the memory entry.
750 * This will generate a no-senders notification, which will be processed
751 * by ipc_kobject_notify(), which will release the one and only
752 * reference on the memory entry and cause it to be destroyed, along
753 * with the VM sub map and its pmap.
754 */
755 mach_memory_entry_port_release(shared_region->sr_mem_entry);
756 mem_entry = NULL;
757 shared_region->sr_mem_entry = IPC_PORT_NULL;
758
b0d623f7
A
759 if (shared_region->sr_timer_call) {
760 thread_call_free(shared_region->sr_timer_call);
761 }
762
2d21ac55
A
763 /* release the shared region structure... */
764 kfree(shared_region, sizeof (*shared_region));
765 SHARED_REGION_TRACE_DEBUG(
766 ("shared_region: destroy(%p) <-\n",
767 shared_region));
768 shared_region = NULL;
769
770}
771
772/*
773 * Gets the address of the first (in time) mapping in the shared region.
774 */
775kern_return_t
776vm_shared_region_start_address(
777 vm_shared_region_t shared_region,
778 mach_vm_offset_t *start_address)
779{
780 kern_return_t kr;
781 mach_vm_offset_t sr_base_address;
782 mach_vm_offset_t sr_first_mapping;
783
784 SHARED_REGION_TRACE_DEBUG(
785 ("shared_region: -> start_address(%p)\n",
786 shared_region));
787 assert(shared_region->sr_ref_count > 1);
788
789 vm_shared_region_lock();
790
791 /*
792 * Wait if there's another thread establishing a mapping
793 * in this shared region right when we're looking at it.
794 * We want a consistent view of the map...
795 */
796 while (shared_region->sr_mapping_in_progress) {
797 /* wait for our turn... */
798 assert(shared_region->sr_ref_count > 1);
799 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
800 THREAD_UNINT);
801 }
802 assert(! shared_region->sr_mapping_in_progress);
803 assert(shared_region->sr_ref_count > 1);
804
805 sr_base_address = shared_region->sr_base_address;
806 sr_first_mapping = shared_region->sr_first_mapping;
807
808 if (sr_first_mapping == (mach_vm_offset_t) -1) {
809 /* shared region is empty */
810 kr = KERN_INVALID_ADDRESS;
811 } else {
812 kr = KERN_SUCCESS;
813 *start_address = sr_base_address + sr_first_mapping;
814 }
815
816 vm_shared_region_unlock();
817
818 SHARED_REGION_TRACE_DEBUG(
819 ("shared_region: start_address(%p) <- 0x%llx\n",
820 shared_region, (long long)shared_region->sr_base_address));
821
822 return kr;
823}
824/*
825 * Establish some mappings of a file in the shared region.
826 * This is used by "dyld" via the shared_region_map_np() system call
827 * to populate the shared region with the appropriate shared cache.
828 *
829 * One could also call it several times to incrementally load several
830 * libraries, as long as they do not overlap.
831 * It will return KERN_SUCCESS if the mappings were successfully established
832 * or if they were already established identically by another process.
833 */
834kern_return_t
835vm_shared_region_map_file(
836 vm_shared_region_t shared_region,
837 unsigned int mappings_count,
838 struct shared_file_mapping_np *mappings,
839 memory_object_control_t file_control,
840 memory_object_size_t file_size,
841 void *root_dir)
842{
843 kern_return_t kr;
844 vm_object_t file_object;
845 ipc_port_t sr_handle;
846 vm_named_entry_t sr_mem_entry;
847 vm_map_t sr_map;
848 mach_vm_offset_t sr_base_address;
849 unsigned int i;
850 mach_port_t map_port;
851 mach_vm_offset_t target_address;
4a3eedf9
A
852 vm_object_t object;
853 vm_object_size_t obj_size;
854
2d21ac55
A
855
856 kr = KERN_SUCCESS;
857
858 vm_shared_region_lock();
859 assert(shared_region->sr_ref_count > 1);
860
861 if (shared_region->sr_root_dir != root_dir) {
862 /*
863 * This shared region doesn't match the current root
864 * directory of this process. Deny the mapping to
865 * avoid tainting the shared region with something that
866 * doesn't quite belong into it.
867 */
868 vm_shared_region_unlock();
869 kr = KERN_PROTECTION_FAILURE;
870 goto done;
871 }
872
873 /*
874 * Make sure we handle only one mapping at a time in a given
875 * shared region, to avoid race conditions. This should not
876 * happen frequently...
877 */
878 while (shared_region->sr_mapping_in_progress) {
879 /* wait for our turn... */
880 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
881 THREAD_UNINT);
882 }
883 assert(! shared_region->sr_mapping_in_progress);
884 assert(shared_region->sr_ref_count > 1);
885 /* let others know we're working in this shared region */
886 shared_region->sr_mapping_in_progress = TRUE;
887
888 vm_shared_region_unlock();
889
890 /* no need to lock because this data is never modified... */
891 sr_handle = shared_region->sr_mem_entry;
892 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
893 sr_map = sr_mem_entry->backing.map;
894 sr_base_address = shared_region->sr_base_address;
895
896 SHARED_REGION_TRACE_DEBUG(
897 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
898 shared_region, mappings_count, mappings,
899 file_control, file_size));
900
901 /* get the VM object associated with the file to be mapped */
902 file_object = memory_object_control_to_vm_object(file_control);
903
904 /* establish the mappings */
905 for (i = 0; i < mappings_count; i++) {
906 SHARED_REGION_TRACE_INFO(
907 ("shared_region: mapping[%d]: "
908 "address:0x%016llx size:0x%016llx offset:0x%016llx "
909 "maxprot:0x%x prot:0x%x\n",
910 i,
911 (long long)mappings[i].sfm_address,
912 (long long)mappings[i].sfm_size,
913 (long long)mappings[i].sfm_file_offset,
914 mappings[i].sfm_max_prot,
915 mappings[i].sfm_init_prot));
916
917 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
918 /* zero-filled memory */
919 map_port = MACH_PORT_NULL;
920 } else {
921 /* file-backed memory */
922 map_port = (ipc_port_t) file_object->pager;
923 }
924
925 /* mapping's address is relative to the shared region base */
926 target_address =
927 mappings[i].sfm_address - sr_base_address;
928
4a3eedf9
A
929 /* establish that mapping, OK if it's "already" there */
930 if (map_port == MACH_PORT_NULL) {
931 /*
932 * We want to map some anonymous memory in a
933 * shared region.
934 * We have to create the VM object now, so that it
935 * can be mapped "copy-on-write".
936 */
937 obj_size = vm_map_round_page(mappings[i].sfm_size);
938 object = vm_object_allocate(obj_size);
939 if (object == VM_OBJECT_NULL) {
940 kr = KERN_RESOURCE_SHORTAGE;
941 } else {
942 kr = vm_map_enter(
943 sr_map,
944 &target_address,
945 vm_map_round_page(mappings[i].sfm_size),
946 0,
947 VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
948 object,
949 0,
950 TRUE,
951 mappings[i].sfm_init_prot & VM_PROT_ALL,
952 mappings[i].sfm_max_prot & VM_PROT_ALL,
953 VM_INHERIT_DEFAULT);
954 }
955 } else {
956 object = VM_OBJECT_NULL; /* no anonymous memory here */
957 kr = vm_map_enter_mem_object(
958 sr_map,
959 &target_address,
960 vm_map_round_page(mappings[i].sfm_size),
961 0,
962 VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
963 map_port,
964 mappings[i].sfm_file_offset,
965 TRUE,
966 mappings[i].sfm_init_prot & VM_PROT_ALL,
967 mappings[i].sfm_max_prot & VM_PROT_ALL,
968 VM_INHERIT_DEFAULT);
2d21ac55
A
969 }
970
4a3eedf9
A
971 if (kr != KERN_SUCCESS) {
972 if (map_port == MACH_PORT_NULL) {
973 /*
974 * Get rid of the VM object we just created
975 * but failed to map.
976 */
977 vm_object_deallocate(object);
978 object = VM_OBJECT_NULL;
979 }
980 if (kr == KERN_MEMORY_PRESENT) {
981 /*
982 * This exact mapping was already there:
983 * that's fine.
984 */
985 SHARED_REGION_TRACE_INFO(
986 ("shared_region: mapping[%d]: "
987 "address:0x%016llx size:0x%016llx "
988 "offset:0x%016llx "
989 "maxprot:0x%x prot:0x%x "
990 "already mapped...\n",
991 i,
992 (long long)mappings[i].sfm_address,
993 (long long)mappings[i].sfm_size,
994 (long long)mappings[i].sfm_file_offset,
995 mappings[i].sfm_max_prot,
996 mappings[i].sfm_init_prot));
997 /*
998 * We didn't establish this mapping ourselves;
999 * let's reset its size, so that we do not
1000 * attempt to undo it if an error occurs later.
1001 */
1002 mappings[i].sfm_size = 0;
1003 kr = KERN_SUCCESS;
1004 } else {
1005 unsigned int j;
1006
1007 /* this mapping failed ! */
1008 SHARED_REGION_TRACE_ERROR(
1009 ("shared_region: mapping[%d]: "
1010 "address:0x%016llx size:0x%016llx "
1011 "offset:0x%016llx "
1012 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1013 i,
1014 (long long)mappings[i].sfm_address,
1015 (long long)mappings[i].sfm_size,
1016 (long long)mappings[i].sfm_file_offset,
1017 mappings[i].sfm_max_prot,
1018 mappings[i].sfm_init_prot,
1019 kr));
1020
1021 /*
1022 * Undo the mappings we've established so far.
1023 */
1024 for (j = 0; j < i; j++) {
1025 kern_return_t kr2;
1026
1027 if (mappings[j].sfm_size == 0) {
1028 /*
1029 * We didn't establish this
1030 * mapping, so nothing to undo.
1031 */
1032 continue;
1033 }
1034 SHARED_REGION_TRACE_INFO(
1035 ("shared_region: mapping[%d]: "
1036 "address:0x%016llx "
1037 "size:0x%016llx "
1038 "offset:0x%016llx "
1039 "maxprot:0x%x prot:0x%x: "
1040 "undoing...\n",
1041 j,
1042 (long long)mappings[j].sfm_address,
1043 (long long)mappings[j].sfm_size,
1044 (long long)mappings[j].sfm_file_offset,
1045 mappings[j].sfm_max_prot,
1046 mappings[j].sfm_init_prot));
1047 kr2 = mach_vm_deallocate(
1048 sr_map,
1049 (mappings[j].sfm_address -
1050 sr_base_address),
1051 mappings[j].sfm_size);
1052 assert(kr2 == KERN_SUCCESS);
1053 }
1054
1055 break;
1056 }
1057
1058 }
1059
1060 /*
1061 * Record the first (chronologically) mapping in
1062 * this shared region.
1063 * We're protected by "sr_mapping_in_progress" here,
1064 * so no need to lock "shared_region".
1065 */
2d21ac55
A
1066 if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
1067 shared_region->sr_first_mapping = target_address;
1068 }
1069 }
1070
1071 vm_shared_region_lock();
1072 assert(shared_region->sr_ref_count > 1);
1073 assert(shared_region->sr_mapping_in_progress);
1074 /* we're done working on that shared region */
1075 shared_region->sr_mapping_in_progress = FALSE;
1076 thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1077 vm_shared_region_unlock();
1078
1079done:
1080 SHARED_REGION_TRACE_DEBUG(
1081 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1082 shared_region, mappings_count, mappings,
1083 file_control, file_size, kr));
1084 return kr;
1085}
1086
1087/*
1088 * Enter the appropriate shared region into "map" for "task".
1089 * This involves looking up the shared region (and possibly creating a new
1090 * one) for the desired environment, then mapping the VM sub map into the
1091 * task's VM "map", with the appropriate level of pmap-nesting.
1092 */
1093kern_return_t
1094vm_shared_region_enter(
1095 struct _vm_map *map,
1096 struct task *task,
1097 void *fsroot,
1098 cpu_type_t cpu)
1099{
1100 kern_return_t kr;
1101 vm_shared_region_t shared_region;
1102 vm_map_offset_t sr_address, sr_offset, target_address;
1103 vm_map_size_t sr_size, mapping_size;
1104 vm_map_offset_t sr_pmap_nesting_start;
1105 vm_map_size_t sr_pmap_nesting_size;
1106 ipc_port_t sr_handle;
1107 boolean_t is_64bit;
1108
1109 is_64bit = task_has_64BitAddr(task);
1110
1111 SHARED_REGION_TRACE_DEBUG(
1112 ("shared_region: -> "
1113 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1114 map, task, fsroot, cpu, is_64bit));
1115
1116 /* lookup (create if needed) the shared region for this environment */
1117 shared_region = vm_shared_region_lookup(fsroot, cpu, is_64bit);
1118 if (shared_region == NULL) {
1119 /* this should not happen ! */
1120 SHARED_REGION_TRACE_ERROR(
1121 ("shared_region: -> "
1122 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1123 "lookup failed !\n",
1124 map, task, fsroot, cpu, is_64bit));
1125 //panic("shared_region_enter: lookup failed\n");
1126 return KERN_FAILURE;
1127 }
1128
1129 /* let the task use that shared region */
1130 vm_shared_region_set(task, shared_region);
1131
1132 kr = KERN_SUCCESS;
1133 /* no need to lock since this data is never modified */
1134 sr_address = shared_region->sr_base_address;
1135 sr_size = shared_region->sr_size;
1136 sr_handle = shared_region->sr_mem_entry;
1137 sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start;
1138 sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size;
1139
1140 /*
1141 * Start mapping the shared region's VM sub map into the task's VM map.
1142 */
1143 sr_offset = 0;
1144
1145 if (sr_pmap_nesting_start > sr_address) {
1146 /* we need to map a range without pmap-nesting first */
1147 target_address = sr_address;
1148 mapping_size = sr_pmap_nesting_start - sr_address;
1149 kr = vm_map_enter_mem_object(
1150 map,
1151 &target_address,
1152 mapping_size,
1153 0,
1154 VM_FLAGS_FIXED,
1155 sr_handle,
1156 sr_offset,
1157 TRUE,
1158 VM_PROT_READ,
1159 VM_PROT_ALL,
1160 VM_INHERIT_SHARE);
1161 if (kr != KERN_SUCCESS) {
1162 SHARED_REGION_TRACE_ERROR(
1163 ("shared_region: enter(%p,%p,%p,%d,%d): "
1164 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1165 map, task, fsroot, cpu, is_64bit,
1166 (long long)target_address,
1167 (long long)mapping_size, sr_handle, kr));
1168 goto done;
1169 }
1170 SHARED_REGION_TRACE_DEBUG(
1171 ("shared_region: enter(%p,%p,%p,%d,%d): "
1172 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1173 map, task, fsroot, cpu, is_64bit,
1174 (long long)target_address, (long long)mapping_size,
1175 sr_handle, kr));
1176 sr_offset += mapping_size;
1177 sr_size -= mapping_size;
1178 }
1179 /*
1180 * We may need to map several pmap-nested portions, due to platform
1181 * specific restrictions on pmap nesting.
1182 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1183 */
1184 for (;
1185 sr_pmap_nesting_size > 0;
1186 sr_offset += mapping_size,
1187 sr_size -= mapping_size,
1188 sr_pmap_nesting_size -= mapping_size) {
1189 target_address = sr_address + sr_offset;
1190 mapping_size = sr_pmap_nesting_size;
1191 if (mapping_size > pmap_nesting_size_max) {
1192 mapping_size = (vm_map_offset_t) pmap_nesting_size_max;
1193 }
1194 kr = vm_map_enter_mem_object(
1195 map,
1196 &target_address,
1197 mapping_size,
1198 0,
1199 (VM_FLAGS_FIXED | VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP)),
1200 sr_handle,
1201 sr_offset,
1202 TRUE,
1203 VM_PROT_READ,
1204 VM_PROT_ALL,
1205 VM_INHERIT_SHARE);
1206 if (kr != KERN_SUCCESS) {
1207 SHARED_REGION_TRACE_ERROR(
1208 ("shared_region: enter(%p,%p,%p,%d,%d): "
1209 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1210 map, task, fsroot, cpu, is_64bit,
1211 (long long)target_address,
1212 (long long)mapping_size, sr_handle, kr));
1213 goto done;
1214 }
1215 SHARED_REGION_TRACE_DEBUG(
1216 ("shared_region: enter(%p,%p,%p,%d,%d): "
1217 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1218 map, task, fsroot, cpu, is_64bit,
1219 (long long)target_address, (long long)mapping_size,
1220 sr_handle, kr));
1221 }
1222 if (sr_size > 0) {
1223 /* and there's some left to be mapped without pmap-nesting */
1224 target_address = sr_address + sr_offset;
1225 mapping_size = sr_size;
1226 kr = vm_map_enter_mem_object(
1227 map,
1228 &target_address,
1229 mapping_size,
1230 0,
1231 VM_FLAGS_FIXED,
1232 sr_handle,
1233 sr_offset,
1234 TRUE,
1235 VM_PROT_READ,
1236 VM_PROT_ALL,
1237 VM_INHERIT_SHARE);
1238 if (kr != KERN_SUCCESS) {
1239 SHARED_REGION_TRACE_ERROR(
1240 ("shared_region: enter(%p,%p,%p,%d,%d): "
1241 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1242 map, task, fsroot, cpu, is_64bit,
1243 (long long)target_address,
1244 (long long)mapping_size, sr_handle, kr));
1245 goto done;
1246 }
1247 SHARED_REGION_TRACE_DEBUG(
1248 ("shared_region: enter(%p,%p,%p,%d,%d): "
1249 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1250 map, task, fsroot, cpu, is_64bit,
1251 (long long)target_address, (long long)mapping_size,
1252 sr_handle, kr));
1253 sr_offset += mapping_size;
1254 sr_size -= mapping_size;
1255 }
1256 assert(sr_size == 0);
1257
1258done:
1259 SHARED_REGION_TRACE_DEBUG(
1260 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1261 map, task, fsroot, cpu, is_64bit, kr));
1262 return kr;
1263}
1264
1265/******************************************************************************/
1266/* Comm page support */
1267/******************************************************************************/
1268
1269ipc_port_t commpage32_handle = IPC_PORT_NULL;
1270ipc_port_t commpage64_handle = IPC_PORT_NULL;
1271vm_named_entry_t commpage32_entry = NULL;
1272vm_named_entry_t commpage64_entry = NULL;
1273vm_map_t commpage32_map = VM_MAP_NULL;
1274vm_map_t commpage64_map = VM_MAP_NULL;
1275
1276/*
1277 * Create a memory entry, VM submap and pmap for one commpage.
1278 */
1279static void
1280_vm_commpage_init(
1281 ipc_port_t *handlep,
1282 vm_map_size_t size)
1283{
1284 kern_return_t kr;
1285 vm_named_entry_t mem_entry;
1286 vm_map_t new_map;
1287
1288 SHARED_REGION_TRACE_DEBUG(
1289 ("commpage: -> _init(0x%llx)\n",
1290 (long long)size));
1291
1292 kr = mach_memory_entry_allocate(&mem_entry,
1293 handlep);
1294 if (kr != KERN_SUCCESS) {
1295 panic("_vm_commpage_init: could not allocate mem_entry");
1296 }
1297 new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
1298 if (new_map == VM_MAP_NULL) {
1299 panic("_vm_commpage_init: could not allocate VM map");
1300 }
1301 mem_entry->backing.map = new_map;
1302 mem_entry->internal = TRUE;
1303 mem_entry->is_sub_map = TRUE;
1304 mem_entry->offset = 0;
1305 mem_entry->protection = VM_PROT_ALL;
1306 mem_entry->size = size;
1307
1308 SHARED_REGION_TRACE_DEBUG(
1309 ("commpage: _init(0x%llx) <- %p\n",
1310 (long long)size, *handlep));
1311}
1312
1313/*
1314 * Initialize the comm pages at boot time.
1315 */
1316void
1317vm_commpage_init(void)
1318{
1319 SHARED_REGION_TRACE_DEBUG(
1320 ("commpage: -> init()\n"));
1321
1322 /* create the 32-bit comm page */
1323 _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH);
1324 commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject;
1325 commpage32_map = commpage32_entry->backing.map;
1326
1327 /* XXX if (cpu_is_64bit_capable()) ? */
1328 /* create the 64-bit comm page */
1329 _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH);
1330 commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject;
1331 commpage64_map = commpage64_entry->backing.map;
1332
1333 /* populate them according to this specific platform */
1334 commpage_populate();
b0d623f7
A
1335 __commpage_setup = 1;
1336#if defined(__i386__) || defined(__x86_64__)
1337 if (__system_power_source == 0) {
1338 post_sys_powersource_internal(0, 1);
1339 }
1340#endif /* __i386__ || __x86_64__ */
2d21ac55
A
1341
1342 SHARED_REGION_TRACE_DEBUG(
1343 ("commpage: init() <-\n"));
1344}
1345
1346/*
1347 * Enter the appropriate comm page into the task's address space.
1348 * This is called at exec() time via vm_map_exec().
1349 */
1350kern_return_t
1351vm_commpage_enter(
1352 vm_map_t map,
1353 task_t task)
1354{
1355 ipc_port_t commpage_handle;
1356 vm_map_offset_t commpage_address, objc_address;
1357 vm_map_size_t commpage_size, objc_size;
1358 int vm_flags;
1359 kern_return_t kr;
1360
1361 SHARED_REGION_TRACE_DEBUG(
1362 ("commpage: -> enter(%p,%p)\n",
1363 map, task));
1364
1365 /* the comm page is likely to be beyond the actual end of the VM map */
1366 vm_flags = VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX;
1367
1368 /* select the appropriate comm page for this task */
1369 assert(! (task_has_64BitAddr(task) ^ vm_map_is_64bit(map)));
1370 if (task_has_64BitAddr(task)) {
1371#ifdef __ppc__
1372 /*
1373 * PPC51: ppc64 is limited to 51-bit addresses.
1374 * Memory above that limit is handled specially at the
1375 * pmap level, so do not interfere.
1376 */
1377 vm_flags |= VM_FLAGS_NO_PMAP_CHECK;
1378#endif /* __ppc__ */
1379 commpage_handle = commpage64_handle;
1380 commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS;
1381 commpage_size = _COMM_PAGE64_AREA_LENGTH;
1382 objc_size = _COMM_PAGE64_OBJC_SIZE;
1383 objc_address = _COMM_PAGE64_OBJC_BASE;
1384 } else {
1385 commpage_handle = commpage32_handle;
1386 commpage_address =
1387 (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS;
1388 commpage_size = _COMM_PAGE32_AREA_LENGTH;
1389 objc_size = _COMM_PAGE32_OBJC_SIZE;
1390 objc_address = _COMM_PAGE32_OBJC_BASE;
1391 }
1392
1393 if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 &&
1394 (commpage_size & (pmap_nesting_size_min - 1)) == 0) {
1395 /* the commpage is properly aligned or sized for pmap-nesting */
1396 vm_flags |= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP);
1397 }
1398
1399 /* map the comm page in the task's address space */
1400 assert(commpage_handle != IPC_PORT_NULL);
1401 kr = vm_map_enter_mem_object(
1402 map,
1403 &commpage_address,
1404 commpage_size,
1405 0,
1406 vm_flags,
1407 commpage_handle,
1408 0,
1409 FALSE,
1410 VM_PROT_READ|VM_PROT_EXECUTE,
1411 VM_PROT_READ|VM_PROT_EXECUTE,
1412 VM_INHERIT_SHARE);
1413 if (kr != KERN_SUCCESS) {
1414 SHARED_REGION_TRACE_ERROR(
1415 ("commpage: enter(%p,0x%llx,0x%llx) "
1416 "commpage %p mapping failed 0x%x\n",
1417 map, (long long)commpage_address,
1418 (long long)commpage_size, commpage_handle, kr));
1419 }
1420
1421 /*
1422 * Since we're here, we also pre-allocate some virtual space for the
1423 * Objective-C run-time, if needed...
1424 */
1425 if (objc_size != 0) {
1426 kr = vm_map_enter_mem_object(
1427 map,
1428 &objc_address,
1429 objc_size,
1430 0,
1431 VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX,
1432 IPC_PORT_NULL,
1433 0,
1434 FALSE,
1435 VM_PROT_ALL,
1436 VM_PROT_ALL,
1437 VM_INHERIT_DEFAULT);
1438 if (kr != KERN_SUCCESS) {
1439 SHARED_REGION_TRACE_ERROR(
1440 ("commpage: enter(%p,0x%llx,0x%llx) "
1441 "objc mapping failed 0x%x\n",
1442 map, (long long)objc_address,
1443 (long long)objc_size, kr));
1444 }
1445 }
1446
1447 SHARED_REGION_TRACE_DEBUG(
1448 ("commpage: enter(%p,%p) <- 0x%x\n",
1449 map, task, kr));
1450 return kr;
1451}
b0d623f7
A
1452
1453
1454/*
1455 * This is called from powermanagement code to let kernel know the current source of power.
1456 * 0 if it is external source (connected to power )
1457 * 1 if it is internal power source ie battery
1458 */
1459void
1460#if defined(__i386__) || defined(__x86_64__)
1461post_sys_powersource(int i)
1462#else
1463post_sys_powersource(__unused int i)
1464#endif
1465{
1466#if defined(__i386__) || defined(__x86_64__)
1467 post_sys_powersource_internal(i, 0);
1468#endif /* __i386__ || __x86_64__ */
1469}
1470
1471
1472#if defined(__i386__) || defined(__x86_64__)
1473static void
1474post_sys_powersource_internal(int i, int internal)
1475{
1476 if (internal == 0)
1477 __system_power_source = i;
1478
1479 if (__commpage_setup != 0) {
1480 if (__system_power_source != 0)
1481 commpage_set_spin_count(0);
1482 else
1483 commpage_set_spin_count(MP_SPIN_TRIES);
1484 }
1485}
1486#endif /* __i386__ || __x86_64__ */