2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 /* "dyld" uses this to figure out what the kernel supports */
116 int shared_region_version
= 3;
118 /* trace level, output is sent to the system log file */
119 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
121 /* should local (non-chroot) shared regions persist when no task uses them ? */
122 int shared_region_persistence
= 0; /* no by default */
124 /* delay before reclaiming an unused shared region */
125 int shared_region_destroy_delay
= 120; /* in seconds */
128 * Only one cache gets to slide on Desktop, since we can't
129 * tear down slide info properly today and the desktop actually
130 * produces lots of shared caches.
132 boolean_t shared_region_completed_slide
= FALSE
;
134 /* this lock protects all the shared region data structures */
135 lck_grp_t
*vm_shared_region_lck_grp
;
136 lck_mtx_t vm_shared_region_lock
;
138 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
139 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
140 #define vm_shared_region_sleep(event, interruptible) \
141 lck_mtx_sleep(&vm_shared_region_lock, \
146 /* the list of currently available shared regions (one per environment) */
147 queue_head_t vm_shared_region_queue
;
149 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
150 static vm_shared_region_t
vm_shared_region_create(
154 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
156 static void vm_shared_region_timeout(thread_call_param_t param0
,
157 thread_call_param_t param1
);
159 static int __commpage_setup
= 0;
160 #if defined(__i386__) || defined(__x86_64__)
161 static int __system_power_source
= 1; /* init to extrnal power source */
162 static void post_sys_powersource_internal(int i
, int internal
);
163 #endif /* __i386__ || __x86_64__ */
167 * Initialize the module...
170 vm_shared_region_init(void)
172 SHARED_REGION_TRACE_DEBUG(
173 ("shared_region: -> init\n"));
175 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
177 lck_mtx_init(&vm_shared_region_lock
,
178 vm_shared_region_lck_grp
,
181 queue_init(&vm_shared_region_queue
);
183 SHARED_REGION_TRACE_DEBUG(
184 ("shared_region: <- init\n"));
188 * Retrieve a task's shared region and grab an extra reference to
189 * make sure it doesn't disappear while the caller is using it.
190 * The caller is responsible for consuming that extra reference if
194 vm_shared_region_get(
197 vm_shared_region_t shared_region
;
199 SHARED_REGION_TRACE_DEBUG(
200 ("shared_region: -> get(%p)\n",
201 (void *)VM_KERNEL_ADDRPERM(task
)));
204 vm_shared_region_lock();
205 shared_region
= task
->shared_region
;
207 assert(shared_region
->sr_ref_count
> 0);
208 vm_shared_region_reference_locked(shared_region
);
210 vm_shared_region_unlock();
213 SHARED_REGION_TRACE_DEBUG(
214 ("shared_region: get(%p) <- %p\n",
215 (void *)VM_KERNEL_ADDRPERM(task
),
216 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
218 return shared_region
;
222 * Get the base address of the shared region.
223 * That's the address at which it needs to be mapped in the process's address
225 * No need to lock since this data is set when the shared region is
226 * created and is never modified after that. The caller must hold an extra
227 * reference on the shared region to prevent it from being destroyed.
230 vm_shared_region_base_address(
231 vm_shared_region_t shared_region
)
233 SHARED_REGION_TRACE_DEBUG(
234 ("shared_region: -> base_address(%p)\n",
235 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
236 assert(shared_region
->sr_ref_count
> 1);
237 SHARED_REGION_TRACE_DEBUG(
238 ("shared_region: base_address(%p) <- 0x%llx\n",
239 (void *)VM_KERNEL_ADDRPERM(shared_region
),
240 (long long)shared_region
->sr_base_address
));
241 return shared_region
->sr_base_address
;
245 * Get the size of the shared region.
246 * That's the size that needs to be mapped in the process's address
248 * No need to lock since this data is set when the shared region is
249 * created and is never modified after that. The caller must hold an extra
250 * reference on the shared region to prevent it from being destroyed.
253 vm_shared_region_size(
254 vm_shared_region_t shared_region
)
256 SHARED_REGION_TRACE_DEBUG(
257 ("shared_region: -> size(%p)\n",
258 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
259 assert(shared_region
->sr_ref_count
> 1);
260 SHARED_REGION_TRACE_DEBUG(
261 ("shared_region: size(%p) <- 0x%llx\n",
262 (void *)VM_KERNEL_ADDRPERM(shared_region
),
263 (long long)shared_region
->sr_size
));
264 return shared_region
->sr_size
;
268 * Get the memory entry of the shared region.
269 * That's the "memory object" that needs to be mapped in the process's address
271 * No need to lock since this data is set when the shared region is
272 * created and is never modified after that. The caller must hold an extra
273 * reference on the shared region to prevent it from being destroyed.
276 vm_shared_region_mem_entry(
277 vm_shared_region_t shared_region
)
279 SHARED_REGION_TRACE_DEBUG(
280 ("shared_region: -> mem_entry(%p)\n",
281 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
282 assert(shared_region
->sr_ref_count
> 1);
283 SHARED_REGION_TRACE_DEBUG(
284 ("shared_region: mem_entry(%p) <- %p\n",
285 (void *)VM_KERNEL_ADDRPERM(shared_region
),
286 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
287 return shared_region
->sr_mem_entry
;
291 vm_shared_region_get_slide(
292 vm_shared_region_t shared_region
)
294 SHARED_REGION_TRACE_DEBUG(
295 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
296 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
297 assert(shared_region
->sr_ref_count
> 1);
298 SHARED_REGION_TRACE_DEBUG(
299 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
300 (void *)VM_KERNEL_ADDRPERM(shared_region
),
301 shared_region
->sr_slide_info
.slide
));
303 /* 0 if we haven't slid */
304 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
305 shared_region
->sr_slide_info
.slide
== 0);
307 return shared_region
->sr_slide_info
.slide
;
310 vm_shared_region_slide_info_t
311 vm_shared_region_get_slide_info(
312 vm_shared_region_t shared_region
)
314 SHARED_REGION_TRACE_DEBUG(
315 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
316 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
317 assert(shared_region
->sr_ref_count
> 1);
318 SHARED_REGION_TRACE_DEBUG(
319 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
320 (void *)VM_KERNEL_ADDRPERM(shared_region
),
321 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
322 return &shared_region
->sr_slide_info
;
326 * Set the shared region the process should use.
327 * A NULL new shared region means that we just want to release the old
329 * The caller should already have an extra reference on the new shared region
330 * (if any). We release a reference on the old shared region (if any).
333 vm_shared_region_set(
335 vm_shared_region_t new_shared_region
)
337 vm_shared_region_t old_shared_region
;
339 SHARED_REGION_TRACE_DEBUG(
340 ("shared_region: -> set(%p, %p)\n",
341 (void *)VM_KERNEL_ADDRPERM(task
),
342 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
345 vm_shared_region_lock();
347 old_shared_region
= task
->shared_region
;
348 if (new_shared_region
) {
349 assert(new_shared_region
->sr_ref_count
> 0);
352 task
->shared_region
= new_shared_region
;
354 vm_shared_region_unlock();
357 if (old_shared_region
) {
358 assert(old_shared_region
->sr_ref_count
> 0);
359 vm_shared_region_deallocate(old_shared_region
);
362 SHARED_REGION_TRACE_DEBUG(
363 ("shared_region: set(%p) <- old=%p new=%p\n",
364 (void *)VM_KERNEL_ADDRPERM(task
),
365 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
366 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
370 * Lookup up the shared region for the desired environment.
371 * If none is found, create a new (empty) one.
372 * Grab an extra reference on the returned shared region, to make sure
373 * it doesn't get destroyed before the caller is done with it. The caller
374 * is responsible for consuming that extra reference if necessary.
377 vm_shared_region_lookup(
382 vm_shared_region_t shared_region
;
383 vm_shared_region_t new_shared_region
;
385 SHARED_REGION_TRACE_DEBUG(
386 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
388 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
390 shared_region
= NULL
;
391 new_shared_region
= NULL
;
393 vm_shared_region_lock();
395 queue_iterate(&vm_shared_region_queue
,
399 assert(shared_region
->sr_ref_count
> 0);
400 if (shared_region
->sr_cpu_type
== cputype
&&
401 shared_region
->sr_root_dir
== root_dir
&&
402 shared_region
->sr_64bit
== is_64bit
) {
403 /* found a match ! */
404 vm_shared_region_reference_locked(shared_region
);
408 if (new_shared_region
== NULL
) {
409 /* no match: create a new one */
410 vm_shared_region_unlock();
411 new_shared_region
= vm_shared_region_create(root_dir
,
414 /* do the lookup again, in case we lost a race */
415 vm_shared_region_lock();
418 /* still no match: use our new one */
419 shared_region
= new_shared_region
;
420 new_shared_region
= NULL
;
421 queue_enter(&vm_shared_region_queue
,
429 vm_shared_region_unlock();
431 if (new_shared_region
) {
433 * We lost a race with someone else to create a new shared
434 * region for that environment. Get rid of our unused one.
436 assert(new_shared_region
->sr_ref_count
== 1);
437 new_shared_region
->sr_ref_count
--;
438 vm_shared_region_destroy(new_shared_region
);
439 new_shared_region
= NULL
;
442 SHARED_REGION_TRACE_DEBUG(
443 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
444 (void *)VM_KERNEL_ADDRPERM(root_dir
),
446 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
448 assert(shared_region
->sr_ref_count
> 0);
449 return shared_region
;
453 * Take an extra reference on a shared region.
454 * The vm_shared_region_lock should already be held by the caller.
457 vm_shared_region_reference_locked(
458 vm_shared_region_t shared_region
)
460 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
462 SHARED_REGION_TRACE_DEBUG(
463 ("shared_region: -> reference_locked(%p)\n",
464 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
465 assert(shared_region
->sr_ref_count
> 0);
466 shared_region
->sr_ref_count
++;
468 if (shared_region
->sr_timer_call
!= NULL
) {
471 /* cancel and free any pending timeout */
472 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
474 thread_call_free(shared_region
->sr_timer_call
);
475 shared_region
->sr_timer_call
= NULL
;
476 /* release the reference held by the cancelled timer */
477 shared_region
->sr_ref_count
--;
479 /* the timer will drop the reference and free itself */
483 SHARED_REGION_TRACE_DEBUG(
484 ("shared_region: reference_locked(%p) <- %d\n",
485 (void *)VM_KERNEL_ADDRPERM(shared_region
),
486 shared_region
->sr_ref_count
));
490 * Release a reference on the shared region.
491 * Destroy it if there are no references left.
494 vm_shared_region_deallocate(
495 vm_shared_region_t shared_region
)
497 SHARED_REGION_TRACE_DEBUG(
498 ("shared_region: -> deallocate(%p)\n",
499 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
501 vm_shared_region_lock();
503 assert(shared_region
->sr_ref_count
> 0);
505 if (shared_region
->sr_root_dir
== NULL
) {
507 * Local (i.e. based on the boot volume) shared regions
508 * can persist or not based on the "shared_region_persistence"
510 * Make sure that this one complies.
512 * See comments in vm_shared_region_slide() for notes about
513 * shared regions we have slid (which are not torn down currently).
515 if (shared_region_persistence
&&
516 !shared_region
->sr_persists
) {
517 /* make this one persistent */
518 shared_region
->sr_ref_count
++;
519 shared_region
->sr_persists
= TRUE
;
520 } else if (!shared_region_persistence
&&
521 shared_region
->sr_persists
) {
522 /* make this one no longer persistent */
523 assert(shared_region
->sr_ref_count
> 1);
524 shared_region
->sr_ref_count
--;
525 shared_region
->sr_persists
= FALSE
;
529 assert(shared_region
->sr_ref_count
> 0);
530 shared_region
->sr_ref_count
--;
531 SHARED_REGION_TRACE_DEBUG(
532 ("shared_region: deallocate(%p): ref now %d\n",
533 (void *)VM_KERNEL_ADDRPERM(shared_region
),
534 shared_region
->sr_ref_count
));
536 if (shared_region
->sr_ref_count
== 0) {
539 assert(!shared_region
->sr_slid
);
541 if (shared_region
->sr_timer_call
== NULL
) {
542 /* hold one reference for the timer */
543 assert(! shared_region
->sr_mapping_in_progress
);
544 shared_region
->sr_ref_count
++;
546 /* set up the timer */
547 shared_region
->sr_timer_call
= thread_call_allocate(
548 (thread_call_func_t
) vm_shared_region_timeout
,
549 (thread_call_param_t
) shared_region
);
551 /* schedule the timer */
552 clock_interval_to_deadline(shared_region_destroy_delay
,
555 thread_call_enter_delayed(shared_region
->sr_timer_call
,
558 SHARED_REGION_TRACE_DEBUG(
559 ("shared_region: deallocate(%p): armed timer\n",
560 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
562 vm_shared_region_unlock();
564 /* timer expired: let go of this shared region */
567 * We can't properly handle teardown of a slid object today.
569 assert(!shared_region
->sr_slid
);
572 * Remove it from the queue first, so no one can find
575 queue_remove(&vm_shared_region_queue
,
579 vm_shared_region_unlock();
581 /* ... and destroy it */
582 vm_shared_region_destroy(shared_region
);
583 shared_region
= NULL
;
586 vm_shared_region_unlock();
589 SHARED_REGION_TRACE_DEBUG(
590 ("shared_region: deallocate(%p) <-\n",
591 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
595 vm_shared_region_timeout(
596 thread_call_param_t param0
,
597 __unused thread_call_param_t param1
)
599 vm_shared_region_t shared_region
;
601 shared_region
= (vm_shared_region_t
) param0
;
603 vm_shared_region_deallocate(shared_region
);
607 * Create a new (empty) shared region for a new environment.
609 static vm_shared_region_t
610 vm_shared_region_create(
616 vm_named_entry_t mem_entry
;
617 ipc_port_t mem_entry_port
;
618 vm_shared_region_t shared_region
;
619 vm_shared_region_slide_info_t si
;
621 mach_vm_offset_t base_address
, pmap_nesting_start
;
622 mach_vm_size_t size
, pmap_nesting_size
;
624 SHARED_REGION_TRACE_DEBUG(
625 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
626 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
631 mem_entry_port
= IPC_PORT_NULL
;
632 sub_map
= VM_MAP_NULL
;
634 /* create a new shared region structure... */
635 shared_region
= kalloc(sizeof (*shared_region
));
636 if (shared_region
== NULL
) {
637 SHARED_REGION_TRACE_ERROR(
638 ("shared_region: create: couldn't allocate\n"));
642 /* figure out the correct settings for the desired environment */
646 base_address
= SHARED_REGION_BASE_X86_64
;
647 size
= SHARED_REGION_SIZE_X86_64
;
648 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
649 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
651 case CPU_TYPE_POWERPC
:
652 base_address
= SHARED_REGION_BASE_PPC64
;
653 size
= SHARED_REGION_SIZE_PPC64
;
654 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
655 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
658 SHARED_REGION_TRACE_ERROR(
659 ("shared_region: create: unknown cpu type %d\n",
661 kfree(shared_region
, sizeof (*shared_region
));
662 shared_region
= NULL
;
668 base_address
= SHARED_REGION_BASE_I386
;
669 size
= SHARED_REGION_SIZE_I386
;
670 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
671 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
673 case CPU_TYPE_POWERPC
:
674 base_address
= SHARED_REGION_BASE_PPC
;
675 size
= SHARED_REGION_SIZE_PPC
;
676 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
677 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
680 SHARED_REGION_TRACE_ERROR(
681 ("shared_region: create: unknown cpu type %d\n",
683 kfree(shared_region
, sizeof (*shared_region
));
684 shared_region
= NULL
;
689 /* create a memory entry structure and a Mach port handle */
690 kr
= mach_memory_entry_allocate(&mem_entry
,
692 if (kr
!= KERN_SUCCESS
) {
693 kfree(shared_region
, sizeof (*shared_region
));
694 shared_region
= NULL
;
695 SHARED_REGION_TRACE_ERROR(
696 ("shared_region: create: "
697 "couldn't allocate mem_entry\n"));
701 /* create a VM sub map and its pmap */
702 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
705 if (sub_map
== VM_MAP_NULL
) {
706 ipc_port_release_send(mem_entry_port
);
707 kfree(shared_region
, sizeof (*shared_region
));
708 shared_region
= NULL
;
709 SHARED_REGION_TRACE_ERROR(
710 ("shared_region: create: "
711 "couldn't allocate map\n"));
715 assert(!sub_map
->disable_vmentry_reuse
);
716 sub_map
->is_nested_map
= TRUE
;
718 /* make the memory entry point to the VM sub map */
719 mem_entry
->is_sub_map
= TRUE
;
720 mem_entry
->backing
.map
= sub_map
;
721 mem_entry
->size
= size
;
722 mem_entry
->protection
= VM_PROT_ALL
;
724 /* make the shared region point at the memory entry */
725 shared_region
->sr_mem_entry
= mem_entry_port
;
727 /* fill in the shared region's environment and settings */
728 shared_region
->sr_base_address
= base_address
;
729 shared_region
->sr_size
= size
;
730 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
731 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
732 shared_region
->sr_cpu_type
= cputype
;
733 shared_region
->sr_64bit
= is_64bit
;
734 shared_region
->sr_root_dir
= root_dir
;
736 queue_init(&shared_region
->sr_q
);
737 shared_region
->sr_mapping_in_progress
= FALSE
;
738 shared_region
->sr_slide_in_progress
= FALSE
;
739 shared_region
->sr_persists
= FALSE
;
740 shared_region
->sr_slid
= FALSE
;
741 shared_region
->sr_timer_call
= NULL
;
742 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
744 /* grab a reference for the caller */
745 shared_region
->sr_ref_count
= 1;
747 /* And set up slide info */
748 si
= &shared_region
->sr_slide_info
;
752 si
->slide_object
= NULL
;
753 si
->slide_info_size
= 0;
754 si
->slide_info_entry
= NULL
;
758 SHARED_REGION_TRACE_INFO(
759 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
760 "base=0x%llx,size=0x%llx) <- "
761 "%p mem=(%p,%p) map=%p pmap=%p\n",
762 (void *)VM_KERNEL_ADDRPERM(root_dir
),
763 cputype
, is_64bit
, (long long)base_address
,
765 (void *)VM_KERNEL_ADDRPERM(shared_region
),
766 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
767 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
768 (void *)VM_KERNEL_ADDRPERM(sub_map
),
769 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
771 SHARED_REGION_TRACE_INFO(
772 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
773 "base=0x%llx,size=0x%llx) <- NULL",
774 (void *)VM_KERNEL_ADDRPERM(root_dir
),
775 cputype
, is_64bit
, (long long)base_address
,
778 return shared_region
;
782 * Destroy a now-unused shared region.
783 * The shared region is no longer in the queue and can not be looked up.
786 vm_shared_region_destroy(
787 vm_shared_region_t shared_region
)
789 vm_named_entry_t mem_entry
;
792 SHARED_REGION_TRACE_INFO(
793 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
794 (void *)VM_KERNEL_ADDRPERM(shared_region
),
795 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
796 shared_region
->sr_cpu_type
,
797 shared_region
->sr_64bit
));
799 assert(shared_region
->sr_ref_count
== 0);
800 assert(!shared_region
->sr_persists
);
801 assert(!shared_region
->sr_slid
);
803 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
804 assert(mem_entry
->is_sub_map
);
805 assert(!mem_entry
->internal
);
806 assert(!mem_entry
->is_pager
);
807 assert(!mem_entry
->is_copy
);
808 map
= mem_entry
->backing
.map
;
811 * Clean up the pmap first. The virtual addresses that were
812 * entered in this possibly "nested" pmap may have different values
813 * than the VM map's min and max offsets, if the VM sub map was
814 * mapped at a non-zero offset in the processes' main VM maps, which
815 * is usually the case, so the clean-up we do in vm_map_destroy() would
819 pmap_remove(map
->pmap
,
820 shared_region
->sr_base_address
,
821 (shared_region
->sr_base_address
+
822 shared_region
->sr_size
));
826 * Release our (one and only) handle on the memory entry.
827 * This will generate a no-senders notification, which will be processed
828 * by ipc_kobject_notify(), which will release the one and only
829 * reference on the memory entry and cause it to be destroyed, along
830 * with the VM sub map and its pmap.
832 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
834 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
836 if (shared_region
->sr_timer_call
) {
837 thread_call_free(shared_region
->sr_timer_call
);
842 * If slid, free those resources. We'll want this eventually,
843 * but can't handle it properly today.
845 si
= &shared_region
->sr_slide_info
;
846 if (si
->slide_info_entry
) {
847 kmem_free(kernel_map
,
848 (vm_offset_t
) si
->slide_info_entry
,
849 (vm_size_t
) si
->slide_info_size
);
850 vm_object_deallocate(si
->slide_object
);
854 /* release the shared region structure... */
855 kfree(shared_region
, sizeof (*shared_region
));
857 SHARED_REGION_TRACE_DEBUG(
858 ("shared_region: destroy(%p) <-\n",
859 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
860 shared_region
= NULL
;
865 * Gets the address of the first (in time) mapping in the shared region.
868 vm_shared_region_start_address(
869 vm_shared_region_t shared_region
,
870 mach_vm_offset_t
*start_address
)
873 mach_vm_offset_t sr_base_address
;
874 mach_vm_offset_t sr_first_mapping
;
876 SHARED_REGION_TRACE_DEBUG(
877 ("shared_region: -> start_address(%p)\n",
878 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
879 assert(shared_region
->sr_ref_count
> 1);
881 vm_shared_region_lock();
884 * Wait if there's another thread establishing a mapping
885 * in this shared region right when we're looking at it.
886 * We want a consistent view of the map...
888 while (shared_region
->sr_mapping_in_progress
) {
889 /* wait for our turn... */
890 assert(shared_region
->sr_ref_count
> 1);
891 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
894 assert(! shared_region
->sr_mapping_in_progress
);
895 assert(shared_region
->sr_ref_count
> 1);
897 sr_base_address
= shared_region
->sr_base_address
;
898 sr_first_mapping
= shared_region
->sr_first_mapping
;
900 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
901 /* shared region is empty */
902 kr
= KERN_INVALID_ADDRESS
;
905 *start_address
= sr_base_address
+ sr_first_mapping
;
908 vm_shared_region_unlock();
910 SHARED_REGION_TRACE_DEBUG(
911 ("shared_region: start_address(%p) <- 0x%llx\n",
912 (void *)VM_KERNEL_ADDRPERM(shared_region
),
913 (long long)shared_region
->sr_base_address
));
919 vm_shared_region_undo_mappings(
921 mach_vm_offset_t sr_base_address
,
922 struct shared_file_mapping_np
*mappings
,
923 unsigned int mappings_count
)
926 vm_shared_region_t shared_region
= NULL
;
927 boolean_t reset_shared_region_state
= FALSE
;
929 shared_region
= vm_shared_region_get(current_task());
930 if (shared_region
== NULL
) {
931 printf("Failed to undo mappings because of NULL shared region.\n");
936 if (sr_map
== NULL
) {
937 ipc_port_t sr_handle
;
938 vm_named_entry_t sr_mem_entry
;
940 vm_shared_region_lock();
941 assert(shared_region
->sr_ref_count
> 1);
943 while (shared_region
->sr_mapping_in_progress
) {
944 /* wait for our turn... */
945 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
948 assert(! shared_region
->sr_mapping_in_progress
);
949 assert(shared_region
->sr_ref_count
> 1);
950 /* let others know we're working in this shared region */
951 shared_region
->sr_mapping_in_progress
= TRUE
;
953 vm_shared_region_unlock();
955 reset_shared_region_state
= TRUE
;
957 /* no need to lock because this data is never modified... */
958 sr_handle
= shared_region
->sr_mem_entry
;
959 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
960 sr_map
= sr_mem_entry
->backing
.map
;
961 sr_base_address
= shared_region
->sr_base_address
;
964 * Undo the mappings we've established so far.
966 for (j
= 0; j
< mappings_count
; j
++) {
969 if (mappings
[j
].sfm_size
== 0) {
971 * We didn't establish this
972 * mapping, so nothing to undo.
976 SHARED_REGION_TRACE_INFO(
977 ("shared_region: mapping[%d]: "
981 "maxprot:0x%x prot:0x%x: "
984 (long long)mappings
[j
].sfm_address
,
985 (long long)mappings
[j
].sfm_size
,
986 (long long)mappings
[j
].sfm_file_offset
,
987 mappings
[j
].sfm_max_prot
,
988 mappings
[j
].sfm_init_prot
));
989 kr2
= mach_vm_deallocate(
991 (mappings
[j
].sfm_address
-
993 mappings
[j
].sfm_size
);
994 assert(kr2
== KERN_SUCCESS
);
997 if (reset_shared_region_state
) {
998 vm_shared_region_lock();
999 assert(shared_region
->sr_ref_count
> 1);
1000 assert(shared_region
->sr_mapping_in_progress
);
1001 /* we're done working on that shared region */
1002 shared_region
->sr_mapping_in_progress
= FALSE
;
1003 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1004 vm_shared_region_unlock();
1005 reset_shared_region_state
= FALSE
;
1008 vm_shared_region_deallocate(shared_region
);
1012 * Establish some mappings of a file in the shared region.
1013 * This is used by "dyld" via the shared_region_map_np() system call
1014 * to populate the shared region with the appropriate shared cache.
1016 * One could also call it several times to incrementally load several
1017 * libraries, as long as they do not overlap.
1018 * It will return KERN_SUCCESS if the mappings were successfully established
1019 * or if they were already established identically by another process.
1022 vm_shared_region_map_file(
1023 vm_shared_region_t shared_region
,
1024 unsigned int mappings_count
,
1025 struct shared_file_mapping_np
*mappings
,
1026 memory_object_control_t file_control
,
1027 memory_object_size_t file_size
,
1030 user_addr_t slide_start
,
1031 user_addr_t slide_size
)
1034 vm_object_t file_object
;
1035 ipc_port_t sr_handle
;
1036 vm_named_entry_t sr_mem_entry
;
1038 mach_vm_offset_t sr_base_address
;
1040 mach_port_t map_port
;
1041 vm_map_offset_t target_address
;
1043 vm_object_size_t obj_size
;
1044 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1045 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1046 vm_map_offset_t lowest_unnestable_addr
= 0;
1052 vm_shared_region_lock();
1053 assert(shared_region
->sr_ref_count
> 1);
1055 if (shared_region
->sr_root_dir
!= root_dir
) {
1057 * This shared region doesn't match the current root
1058 * directory of this process. Deny the mapping to
1059 * avoid tainting the shared region with something that
1060 * doesn't quite belong into it.
1062 vm_shared_region_unlock();
1063 kr
= KERN_PROTECTION_FAILURE
;
1068 * Make sure we handle only one mapping at a time in a given
1069 * shared region, to avoid race conditions. This should not
1070 * happen frequently...
1072 while (shared_region
->sr_mapping_in_progress
) {
1073 /* wait for our turn... */
1074 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1077 assert(! shared_region
->sr_mapping_in_progress
);
1078 assert(shared_region
->sr_ref_count
> 1);
1079 /* let others know we're working in this shared region */
1080 shared_region
->sr_mapping_in_progress
= TRUE
;
1082 vm_shared_region_unlock();
1084 /* no need to lock because this data is never modified... */
1085 sr_handle
= shared_region
->sr_mem_entry
;
1086 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1087 sr_map
= sr_mem_entry
->backing
.map
;
1088 sr_base_address
= shared_region
->sr_base_address
;
1090 SHARED_REGION_TRACE_DEBUG(
1091 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1092 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1093 (void *)VM_KERNEL_ADDRPERM(mappings
),
1094 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1096 /* get the VM object associated with the file to be mapped */
1097 file_object
= memory_object_control_to_vm_object(file_control
);
1099 /* establish the mappings */
1100 for (i
= 0; i
< mappings_count
; i
++) {
1101 SHARED_REGION_TRACE_INFO(
1102 ("shared_region: mapping[%d]: "
1103 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1104 "maxprot:0x%x prot:0x%x\n",
1106 (long long)mappings
[i
].sfm_address
,
1107 (long long)mappings
[i
].sfm_size
,
1108 (long long)mappings
[i
].sfm_file_offset
,
1109 mappings
[i
].sfm_max_prot
,
1110 mappings
[i
].sfm_init_prot
));
1112 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1113 /* zero-filled memory */
1114 map_port
= MACH_PORT_NULL
;
1116 /* file-backed memory */
1117 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1120 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1122 * This is the mapping that needs to be slid.
1124 if (mapping_to_slide
!= NULL
) {
1125 SHARED_REGION_TRACE_INFO(
1126 ("shared_region: mapping[%d]: "
1127 "address:0x%016llx size:0x%016llx "
1129 "maxprot:0x%x prot:0x%x "
1130 "will not be slid as only one such mapping is allowed...\n",
1132 (long long)mappings
[i
].sfm_address
,
1133 (long long)mappings
[i
].sfm_size
,
1134 (long long)mappings
[i
].sfm_file_offset
,
1135 mappings
[i
].sfm_max_prot
,
1136 mappings
[i
].sfm_init_prot
));
1138 mapping_to_slide
= &mappings
[i
];
1142 /* mapping's address is relative to the shared region base */
1144 mappings
[i
].sfm_address
- sr_base_address
;
1146 /* establish that mapping, OK if it's "already" there */
1147 if (map_port
== MACH_PORT_NULL
) {
1149 * We want to map some anonymous memory in a
1151 * We have to create the VM object now, so that it
1152 * can be mapped "copy-on-write".
1154 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1155 VM_MAP_PAGE_MASK(sr_map
));
1156 object
= vm_object_allocate(obj_size
);
1157 if (object
== VM_OBJECT_NULL
) {
1158 kr
= KERN_RESOURCE_SHORTAGE
;
1163 vm_map_round_page(mappings
[i
].sfm_size
,
1164 VM_MAP_PAGE_MASK(sr_map
)),
1166 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1170 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1171 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1172 VM_INHERIT_DEFAULT
);
1175 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1176 kr
= vm_map_enter_mem_object(
1179 vm_map_round_page(mappings
[i
].sfm_size
,
1180 VM_MAP_PAGE_MASK(sr_map
)),
1182 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1184 mappings
[i
].sfm_file_offset
,
1186 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1187 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1188 VM_INHERIT_DEFAULT
);
1192 if (kr
== KERN_SUCCESS
) {
1194 * Record the first (chronologically) successful
1195 * mapping in this shared region.
1196 * We're protected by "sr_mapping_in_progress" here,
1197 * so no need to lock "shared_region".
1199 if (first_mapping
== (mach_vm_offset_t
) -1) {
1200 first_mapping
= target_address
;
1204 * Record the lowest writable address in this
1205 * sub map, to log any unexpected unnesting below
1206 * that address (see log_unnest_badness()).
1208 if ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) &&
1209 sr_map
->is_nested_map
&&
1210 (lowest_unnestable_addr
== 0 ||
1211 (target_address
< lowest_unnestable_addr
))) {
1212 lowest_unnestable_addr
= target_address
;
1215 if (map_port
== MACH_PORT_NULL
) {
1217 * Get rid of the VM object we just created
1218 * but failed to map.
1220 vm_object_deallocate(object
);
1221 object
= VM_OBJECT_NULL
;
1223 if (kr
== KERN_MEMORY_PRESENT
) {
1225 * This exact mapping was already there:
1228 SHARED_REGION_TRACE_INFO(
1229 ("shared_region: mapping[%d]: "
1230 "address:0x%016llx size:0x%016llx "
1232 "maxprot:0x%x prot:0x%x "
1233 "already mapped...\n",
1235 (long long)mappings
[i
].sfm_address
,
1236 (long long)mappings
[i
].sfm_size
,
1237 (long long)mappings
[i
].sfm_file_offset
,
1238 mappings
[i
].sfm_max_prot
,
1239 mappings
[i
].sfm_init_prot
));
1241 * We didn't establish this mapping ourselves;
1242 * let's reset its size, so that we do not
1243 * attempt to undo it if an error occurs later.
1245 mappings
[i
].sfm_size
= 0;
1248 /* this mapping failed ! */
1249 SHARED_REGION_TRACE_ERROR(
1250 ("shared_region: mapping[%d]: "
1251 "address:0x%016llx size:0x%016llx "
1253 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1255 (long long)mappings
[i
].sfm_address
,
1256 (long long)mappings
[i
].sfm_size
,
1257 (long long)mappings
[i
].sfm_file_offset
,
1258 mappings
[i
].sfm_max_prot
,
1259 mappings
[i
].sfm_init_prot
,
1262 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1270 if (kr
== KERN_SUCCESS
&&
1272 mapping_to_slide
!= NULL
) {
1273 kr
= vm_shared_region_slide(slide
,
1274 mapping_to_slide
->sfm_file_offset
,
1275 mapping_to_slide
->sfm_size
,
1279 if (kr
!= KERN_SUCCESS
) {
1280 SHARED_REGION_TRACE_ERROR(
1281 ("shared_region: region_slide("
1282 "slide:0x%x start:0x%016llx "
1283 "size:0x%016llx) failed 0x%x\n",
1285 (long long)slide_start
,
1286 (long long)slide_size
,
1288 vm_shared_region_undo_mappings(sr_map
,
1295 if (kr
== KERN_SUCCESS
) {
1296 /* adjust the map's "lowest_unnestable_start" */
1297 lowest_unnestable_addr
&= ~(pmap_nesting_size_min
-1);
1298 if (lowest_unnestable_addr
!=
1299 sr_map
->lowest_unnestable_start
) {
1300 vm_map_lock(sr_map
);
1301 sr_map
->lowest_unnestable_start
=
1302 lowest_unnestable_addr
;
1303 vm_map_unlock(sr_map
);
1307 vm_shared_region_lock();
1308 assert(shared_region
->sr_ref_count
> 1);
1309 assert(shared_region
->sr_mapping_in_progress
);
1310 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1311 if (kr
== KERN_SUCCESS
&&
1312 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1313 shared_region
->sr_first_mapping
= first_mapping
;
1315 /* we're done working on that shared region */
1316 shared_region
->sr_mapping_in_progress
= FALSE
;
1317 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1318 vm_shared_region_unlock();
1321 SHARED_REGION_TRACE_DEBUG(
1322 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1323 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1324 (void *)VM_KERNEL_ADDRPERM(mappings
),
1325 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1330 * Enter the appropriate shared region into "map" for "task".
1331 * This involves looking up the shared region (and possibly creating a new
1332 * one) for the desired environment, then mapping the VM sub map into the
1333 * task's VM "map", with the appropriate level of pmap-nesting.
1336 vm_shared_region_enter(
1337 struct _vm_map
*map
,
1344 vm_shared_region_t shared_region
;
1345 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1346 vm_map_size_t sr_size
, mapping_size
;
1347 vm_map_offset_t sr_pmap_nesting_start
;
1348 vm_map_size_t sr_pmap_nesting_size
;
1349 ipc_port_t sr_handle
;
1350 vm_prot_t cur_prot
, max_prot
;
1352 SHARED_REGION_TRACE_DEBUG(
1353 ("shared_region: -> "
1354 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1355 (void *)VM_KERNEL_ADDRPERM(map
),
1356 (void *)VM_KERNEL_ADDRPERM(task
),
1357 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1359 /* lookup (create if needed) the shared region for this environment */
1360 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, is_64bit
);
1361 if (shared_region
== NULL
) {
1362 /* this should not happen ! */
1363 SHARED_REGION_TRACE_ERROR(
1364 ("shared_region: -> "
1365 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1366 "lookup failed !\n",
1367 (void *)VM_KERNEL_ADDRPERM(map
),
1368 (void *)VM_KERNEL_ADDRPERM(task
),
1369 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1370 //panic("shared_region_enter: lookup failed\n");
1371 return KERN_FAILURE
;
1374 /* let the task use that shared region */
1375 vm_shared_region_set(task
, shared_region
);
1378 /* no need to lock since this data is never modified */
1379 sr_address
= shared_region
->sr_base_address
;
1380 sr_size
= shared_region
->sr_size
;
1381 sr_handle
= shared_region
->sr_mem_entry
;
1382 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1383 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1385 cur_prot
= VM_PROT_READ
;
1388 * XXX BINARY COMPATIBILITY
1389 * java6 apparently needs to modify some code in the
1390 * dyld shared cache and needs to be allowed to add
1393 max_prot
= VM_PROT_ALL
;
1394 #else /* __x86_64__ */
1395 max_prot
= VM_PROT_READ
;
1396 #endif /* __x86_64__ */
1398 * Start mapping the shared region's VM sub map into the task's VM map.
1402 if (sr_pmap_nesting_start
> sr_address
) {
1403 /* we need to map a range without pmap-nesting first */
1404 target_address
= sr_address
;
1405 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1406 kr
= vm_map_enter_mem_object(
1418 if (kr
!= KERN_SUCCESS
) {
1419 SHARED_REGION_TRACE_ERROR(
1420 ("shared_region: enter(%p,%p,%p,%d,%d): "
1421 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1422 (void *)VM_KERNEL_ADDRPERM(map
),
1423 (void *)VM_KERNEL_ADDRPERM(task
),
1424 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1426 (long long)target_address
,
1427 (long long)mapping_size
,
1428 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1431 SHARED_REGION_TRACE_DEBUG(
1432 ("shared_region: enter(%p,%p,%p,%d,%d): "
1433 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1434 (void *)VM_KERNEL_ADDRPERM(map
),
1435 (void *)VM_KERNEL_ADDRPERM(task
),
1436 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1437 (long long)target_address
, (long long)mapping_size
,
1438 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1439 sr_offset
+= mapping_size
;
1440 sr_size
-= mapping_size
;
1443 * We may need to map several pmap-nested portions, due to platform
1444 * specific restrictions on pmap nesting.
1445 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1448 sr_pmap_nesting_size
> 0;
1449 sr_offset
+= mapping_size
,
1450 sr_size
-= mapping_size
,
1451 sr_pmap_nesting_size
-= mapping_size
) {
1452 target_address
= sr_address
+ sr_offset
;
1453 mapping_size
= sr_pmap_nesting_size
;
1454 if (mapping_size
> pmap_nesting_size_max
) {
1455 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1457 kr
= vm_map_enter_mem_object(
1462 (VM_FLAGS_FIXED
| VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
)),
1469 if (kr
!= KERN_SUCCESS
) {
1470 SHARED_REGION_TRACE_ERROR(
1471 ("shared_region: enter(%p,%p,%p,%d,%d): "
1472 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1473 (void *)VM_KERNEL_ADDRPERM(map
),
1474 (void *)VM_KERNEL_ADDRPERM(task
),
1475 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1477 (long long)target_address
,
1478 (long long)mapping_size
,
1479 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1482 SHARED_REGION_TRACE_DEBUG(
1483 ("shared_region: enter(%p,%p,%p,%d,%d): "
1484 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1485 (void *)VM_KERNEL_ADDRPERM(map
),
1486 (void *)VM_KERNEL_ADDRPERM(task
),
1487 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1488 (long long)target_address
, (long long)mapping_size
,
1489 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1492 /* and there's some left to be mapped without pmap-nesting */
1493 target_address
= sr_address
+ sr_offset
;
1494 mapping_size
= sr_size
;
1495 kr
= vm_map_enter_mem_object(
1507 if (kr
!= KERN_SUCCESS
) {
1508 SHARED_REGION_TRACE_ERROR(
1509 ("shared_region: enter(%p,%p,%p,%d,%d): "
1510 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1511 (void *)VM_KERNEL_ADDRPERM(map
),
1512 (void *)VM_KERNEL_ADDRPERM(task
),
1513 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1515 (long long)target_address
,
1516 (long long)mapping_size
,
1517 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1520 SHARED_REGION_TRACE_DEBUG(
1521 ("shared_region: enter(%p,%p,%p,%d,%d): "
1522 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1523 (void *)VM_KERNEL_ADDRPERM(map
),
1524 (void *)VM_KERNEL_ADDRPERM(task
),
1525 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1526 (long long)target_address
, (long long)mapping_size
,
1527 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1528 sr_offset
+= mapping_size
;
1529 sr_size
-= mapping_size
;
1531 assert(sr_size
== 0);
1534 SHARED_REGION_TRACE_DEBUG(
1535 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1536 (void *)VM_KERNEL_ADDRPERM(map
),
1537 (void *)VM_KERNEL_ADDRPERM(task
),
1538 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
, kr
));
1542 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1543 struct vm_shared_region_slide_info slide_info
;
1546 vm_shared_region_sliding_valid(uint32_t slide
)
1548 kern_return_t kr
= KERN_SUCCESS
;
1549 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1551 /* No region yet? we're fine. */
1556 if ((sr
->sr_slid
== TRUE
) && slide
) {
1557 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1558 printf("Only one shared region can be slid\n");
1562 * Request for sliding when we've
1563 * already done it with exactly the
1564 * same slide value before.
1565 * This isn't wrong technically but
1566 * we don't want to slide again and
1567 * so we return this value.
1569 kr
= KERN_INVALID_ARGUMENT
;
1572 vm_shared_region_deallocate(sr
);
1577 vm_shared_region_slide_init(
1578 vm_shared_region_t sr
,
1579 mach_vm_size_t slide_info_size
,
1580 mach_vm_offset_t start
,
1581 mach_vm_size_t size
,
1583 memory_object_control_t sr_file_control
)
1585 kern_return_t kr
= KERN_SUCCESS
;
1586 vm_object_t object
= VM_OBJECT_NULL
;
1587 vm_object_offset_t offset
= 0;
1588 vm_shared_region_slide_info_t si
= vm_shared_region_get_slide_info(sr
);
1589 vm_offset_t slide_info_entry
;
1591 vm_map_t map
= NULL
, cur_map
= NULL
;
1592 boolean_t is_map_locked
= FALSE
;
1594 assert(sr
->sr_slide_in_progress
);
1595 assert(!sr
->sr_slid
);
1596 assert(si
->slide_object
== NULL
);
1597 assert(si
->slide_info_entry
== NULL
);
1599 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1600 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1605 kr
= kmem_alloc(kernel_map
,
1606 (vm_offset_t
*) &slide_info_entry
,
1607 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1608 if (kr
!= KERN_SUCCESS
) {
1612 if (sr_file_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1614 object
= memory_object_control_to_vm_object(sr_file_control
);
1615 vm_object_reference(object
);
1618 vm_object_lock(object
);
1621 * Remove this entire "else" block and all "map" references
1622 * once we get rid of the shared_region_slide_np()
1625 vm_map_entry_t entry
= VM_MAP_ENTRY_NULL
;
1626 map
= current_map();
1627 vm_map_lock_read(map
);
1628 is_map_locked
= TRUE
;
1631 if(!vm_map_lookup_entry(map
, start
, &entry
)) {
1632 kr
= KERN_INVALID_ARGUMENT
;
1634 vm_object_t shadow_obj
= VM_OBJECT_NULL
;
1636 if (entry
->is_sub_map
== TRUE
) {
1637 map
= VME_SUBMAP(entry
);
1638 start
-= entry
->vme_start
;
1639 start
+= VME_OFFSET(entry
);
1640 vm_map_lock_read(map
);
1641 vm_map_unlock_read(cur_map
);
1644 object
= VME_OBJECT(entry
);
1645 offset
= ((start
- entry
->vme_start
) +
1649 vm_object_lock(object
);
1650 while (object
->shadow
!= VM_OBJECT_NULL
) {
1651 shadow_obj
= object
->shadow
;
1652 vm_object_lock(shadow_obj
);
1653 vm_object_unlock(object
);
1654 object
= shadow_obj
;
1659 if (object
->internal
== TRUE
) {
1660 kr
= KERN_INVALID_ADDRESS
;
1661 } else if (object
->object_slid
) {
1662 /* Can only be slid once */
1663 printf("%s: found vm_object %p already slid?\n", __FUNCTION__
, object
);
1667 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1668 si
->slide_info_size
= slide_info_size
;
1669 si
->slide_object
= object
;
1671 si
->end
= si
->start
+ size
;
1675 * If we want to have this region get deallocated/freed
1676 * then we will have to make sure that we msync(..MS_INVALIDATE..)
1677 * the pages associated with this shared region. Those pages would
1678 * have been slid with an older slide value.
1682 * Pointers in object are held without references; they
1683 * are disconnected at the time that we destroy the
1684 * shared region, and since the shared region holds
1685 * a reference on the object, no references in the other
1686 * direction are required.
1688 object
->object_slid
= TRUE
;
1689 object
->vo_slide_info
= si
;
1692 vm_object_unlock(object
);
1693 if (is_map_locked
== TRUE
) {
1694 vm_map_unlock_read(map
);
1697 if (kr
!= KERN_SUCCESS
) {
1698 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
1704 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
) {
1705 return (void*)sr
->sr_slide_info
.slide_info_entry
;
1708 static kern_return_t
1709 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info
)
1711 uint32_t pageIndex
=0;
1712 uint16_t entryIndex
=0;
1713 uint16_t *toc
= NULL
;
1715 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1716 for (;pageIndex
< s_info
->toc_count
; pageIndex
++) {
1718 entryIndex
= (uint16_t)(toc
[pageIndex
]);
1720 if (entryIndex
>= s_info
->entry_count
) {
1721 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
1722 return KERN_FAILURE
;
1726 return KERN_SUCCESS
;
1729 static kern_return_t
1730 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info
, mach_vm_size_t slide_info_size
)
1732 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
1733 return KERN_FAILURE
;
1736 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
1738 uint32_t page_starts_count
= s_info
->page_starts_count
;
1739 uint32_t page_extras_count
= s_info
->page_extras_count
;
1740 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
1741 if (num_trailing_entries
< page_starts_count
) {
1742 return KERN_FAILURE
;
1745 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
1746 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
1747 if (trailing_size
>> 1 != num_trailing_entries
) {
1748 return KERN_FAILURE
;
1751 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
1752 if (required_size
< sizeof(*s_info
)) {
1753 return KERN_FAILURE
;
1756 if (required_size
> slide_info_size
) {
1757 return KERN_FAILURE
;
1760 return KERN_SUCCESS
;
1764 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
1766 vm_shared_region_slide_info_t si
;
1767 vm_shared_region_slide_info_entry_t s_info
;
1770 si
= vm_shared_region_get_slide_info(sr
);
1771 s_info
= si
->slide_info_entry
;
1773 kr
= mach_vm_protect(kernel_map
,
1774 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
1775 (mach_vm_size_t
) si
->slide_info_size
,
1776 TRUE
, VM_PROT_READ
);
1777 if (kr
!= KERN_SUCCESS
) {
1778 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
1781 if (s_info
->version
== 1) {
1782 kr
= vm_shared_region_slide_sanity_check_v1(&s_info
->v1
);
1783 } else if (s_info
->version
== 2) {
1784 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, si
->slide_info_size
);
1788 if (kr
!= KERN_SUCCESS
) {
1792 return KERN_SUCCESS
;
1794 if (si
->slide_info_entry
!= NULL
) {
1795 kmem_free(kernel_map
,
1796 (vm_offset_t
) si
->slide_info_entry
,
1797 (vm_size_t
) si
->slide_info_size
);
1799 vm_object_lock(si
->slide_object
);
1800 si
->slide_object
->object_slid
= FALSE
;
1801 si
->slide_object
->vo_slide_info
= NULL
;
1802 vm_object_unlock(si
->slide_object
);
1804 vm_object_deallocate(si
->slide_object
);
1805 si
->slide_object
= NULL
;
1809 si
->slide_info_entry
= NULL
;
1810 si
->slide_info_size
= 0;
1812 return KERN_FAILURE
;
1815 static kern_return_t
1816 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
1818 uint16_t *toc
= NULL
;
1819 slide_info_entry_toc_t bitmap
= NULL
;
1822 uint32_t slide
= si
->slide
;
1823 int is_64
= task_has_64BitAddr(current_task());
1825 vm_shared_region_slide_info_entry_v1_t s_info
= &si
->slide_info_entry
->v1
;
1826 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1828 if (pageIndex
>= s_info
->toc_count
) {
1829 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
1831 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
1832 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
1834 if (entryIndex
>= s_info
->entry_count
) {
1835 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
1837 bitmap
= &slide_info_entries
[entryIndex
];
1839 for(i
=0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
1840 b
= bitmap
->entry
[i
];
1842 for (j
=0; j
<8; ++j
) {
1844 uint32_t *ptr_to_slide
;
1847 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
)+(sizeof(uint32_t)*(i
*8 +j
)));
1848 old_value
= *ptr_to_slide
;
1849 *ptr_to_slide
+= slide
;
1850 if (is_64
&& *ptr_to_slide
< old_value
) {
1852 * We just slid the low 32 bits of a 64-bit pointer
1853 * and it looks like there should have been a carry-over
1854 * to the upper 32 bits.
1855 * The sliding failed...
1857 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
1858 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
1859 return KERN_FAILURE
;
1868 return KERN_SUCCESS
;
1871 static kern_return_t
1873 uint8_t *page_content
,
1874 uint16_t start_offset
,
1875 uint32_t slide_amount
,
1876 vm_shared_region_slide_info_entry_v2_t s_info
)
1878 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
1880 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
1881 const uint32_t value_mask
= ~delta_mask
;
1882 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
1883 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
1885 uint32_t page_offset
= start_offset
;
1888 while (delta
!= 0 && page_offset
<= last_page_offset
) {
1892 loc
= page_content
+ page_offset
;
1893 memcpy(&value
, loc
, sizeof(value
));
1894 delta
= (value
& delta_mask
) >> delta_shift
;
1895 value
&= value_mask
;
1899 value
+= slide_amount
;
1901 memcpy(loc
, &value
, sizeof(value
));
1902 page_offset
+= delta
;
1905 /* If the offset went past the end of the page, then the slide data is invalid. */
1906 if (page_offset
> last_page_offset
) {
1907 return KERN_FAILURE
;
1909 return KERN_SUCCESS
;
1912 static kern_return_t
1914 uint8_t *page_content
,
1915 uint16_t start_offset
,
1916 uint32_t slide_amount
,
1917 vm_shared_region_slide_info_entry_v2_t s_info
)
1919 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
1921 const uint64_t delta_mask
= s_info
->delta_mask
;
1922 const uint64_t value_mask
= ~delta_mask
;
1923 const uint64_t value_add
= s_info
->value_add
;
1924 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
1926 uint32_t page_offset
= start_offset
;
1929 while (delta
!= 0 && page_offset
<= last_page_offset
) {
1933 loc
= page_content
+ page_offset
;
1934 memcpy(&value
, loc
, sizeof(value
));
1935 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
1936 value
&= value_mask
;
1940 value
+= slide_amount
;
1942 memcpy(loc
, &value
, sizeof(value
));
1943 page_offset
+= delta
;
1946 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
1947 /* If a pointer straddling the page boundary needs to be adjusted, then
1948 * add the slide to the lower half. The encoding guarantees that the upper
1949 * half on the next page will need no masking.
1951 * This assumes a little-endian machine and that the region being slid
1952 * never crosses a 4 GB boundary. */
1954 uint8_t *loc
= page_content
+ page_offset
;
1957 memcpy(&value
, loc
, sizeof(value
));
1958 value
+= slide_amount
;
1959 memcpy(loc
, &value
, sizeof(value
));
1960 } else if (page_offset
> last_page_offset
) {
1961 return KERN_FAILURE
;
1964 return KERN_SUCCESS
;
1967 static kern_return_t
1971 uint8_t *page_content
,
1972 uint16_t start_offset
,
1973 uint32_t slide_amount
,
1974 vm_shared_region_slide_info_entry_v2_t s_info
)
1978 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
1980 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
1983 if (kr
!= KERN_SUCCESS
) {
1984 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
1985 pageIndex
, start_offset
, slide_amount
);
1990 static kern_return_t
1991 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
1993 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->slide_info_entry
->v2
;
1994 const uint32_t slide_amount
= si
->slide
;
1996 /* The high bits of the delta_mask field are nonzero precisely when the shared
1997 * cache is 64-bit. */
1998 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2000 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2001 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2003 uint8_t *page_content
= (uint8_t *)vaddr
;
2004 uint16_t page_entry
;
2006 if (pageIndex
>= s_info
->page_starts_count
) {
2007 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2008 pageIndex
, s_info
->page_starts_count
);
2009 return KERN_FAILURE
;
2011 page_entry
= page_starts
[pageIndex
];
2013 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2014 return KERN_SUCCESS
;
2017 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2018 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2022 uint16_t page_start_offset
;
2025 if (chain_index
>= s_info
->page_extras_count
) {
2026 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2027 chain_index
, s_info
->page_extras_count
);
2028 return KERN_FAILURE
;
2030 info
= page_extras
[chain_index
];
2031 page_start_offset
= (info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2033 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2034 if (kr
!= KERN_SUCCESS
) {
2035 return KERN_FAILURE
;
2039 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2041 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2044 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2045 if (kr
!= KERN_SUCCESS
) {
2046 return KERN_FAILURE
;
2050 return KERN_SUCCESS
;
2054 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2056 if (si
->slide_info_entry
->version
== 1) {
2057 return vm_shared_region_slide_page_v1(si
, vaddr
, pageIndex
);
2059 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2063 /******************************************************************************/
2064 /* Comm page support */
2065 /******************************************************************************/
2067 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2068 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2069 vm_named_entry_t commpage32_entry
= NULL
;
2070 vm_named_entry_t commpage64_entry
= NULL
;
2071 vm_map_t commpage32_map
= VM_MAP_NULL
;
2072 vm_map_t commpage64_map
= VM_MAP_NULL
;
2074 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2075 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2076 vm_named_entry_t commpage_text32_entry
= NULL
;
2077 vm_named_entry_t commpage_text64_entry
= NULL
;
2078 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2079 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
2081 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
2082 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
2084 #if defined(__i386__) || defined(__x86_64__)
2086 * Create a memory entry, VM submap and pmap for one commpage.
2090 ipc_port_t
*handlep
,
2094 vm_named_entry_t mem_entry
;
2097 SHARED_REGION_TRACE_DEBUG(
2098 ("commpage: -> _init(0x%llx)\n",
2101 kr
= mach_memory_entry_allocate(&mem_entry
,
2103 if (kr
!= KERN_SUCCESS
) {
2104 panic("_vm_commpage_init: could not allocate mem_entry");
2106 new_map
= vm_map_create(pmap_create(NULL
, 0, 0), 0, size
, TRUE
);
2107 if (new_map
== VM_MAP_NULL
) {
2108 panic("_vm_commpage_init: could not allocate VM map");
2110 mem_entry
->backing
.map
= new_map
;
2111 mem_entry
->internal
= TRUE
;
2112 mem_entry
->is_sub_map
= TRUE
;
2113 mem_entry
->offset
= 0;
2114 mem_entry
->protection
= VM_PROT_ALL
;
2115 mem_entry
->size
= size
;
2117 SHARED_REGION_TRACE_DEBUG(
2118 ("commpage: _init(0x%llx) <- %p\n",
2119 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
2125 *Initialize the comm text pages at boot time
2127 extern u_int32_t
random(void);
2129 vm_commpage_text_init(void)
2131 SHARED_REGION_TRACE_DEBUG(
2132 ("commpage text: ->init()\n"));
2133 #if defined(__i386__) || defined(__x86_64__)
2134 /* create the 32 bit comm text page */
2135 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
2136 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2137 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
2138 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
2139 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
2140 /* XXX if (cpu_is_64bit_capable()) ? */
2141 /* create the 64-bit comm page */
2142 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
2143 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2144 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
2145 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
2146 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
2148 commpage_text_populate();
2150 #error Unknown architecture.
2151 #endif /* __i386__ || __x86_64__ */
2152 /* populate the routines in here */
2153 SHARED_REGION_TRACE_DEBUG(
2154 ("commpage text: init() <-\n"));
2159 * Initialize the comm pages at boot time.
2162 vm_commpage_init(void)
2164 SHARED_REGION_TRACE_DEBUG(
2165 ("commpage: -> init()\n"));
2167 #if defined(__i386__) || defined(__x86_64__)
2168 /* create the 32-bit comm page */
2169 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
2170 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
2171 commpage32_map
= commpage32_entry
->backing
.map
;
2173 /* XXX if (cpu_is_64bit_capable()) ? */
2174 /* create the 64-bit comm page */
2175 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
2176 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
2177 commpage64_map
= commpage64_entry
->backing
.map
;
2179 #endif /* __i386__ || __x86_64__ */
2181 /* populate them according to this specific platform */
2182 commpage_populate();
2183 __commpage_setup
= 1;
2184 #if defined(__i386__) || defined(__x86_64__)
2185 if (__system_power_source
== 0) {
2186 post_sys_powersource_internal(0, 1);
2188 #endif /* __i386__ || __x86_64__ */
2190 SHARED_REGION_TRACE_DEBUG(
2191 ("commpage: init() <-\n"));
2195 * Enter the appropriate comm page into the task's address space.
2196 * This is called at exec() time via vm_map_exec().
2204 ipc_port_t commpage_handle
, commpage_text_handle
;
2205 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
2206 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
2210 SHARED_REGION_TRACE_DEBUG(
2211 ("commpage: -> enter(%p,%p)\n",
2212 (void *)VM_KERNEL_ADDRPERM(map
),
2213 (void *)VM_KERNEL_ADDRPERM(task
)));
2215 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
2216 /* the comm page is likely to be beyond the actual end of the VM map */
2217 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
;
2219 /* select the appropriate comm page for this task */
2220 assert(! (is64bit
^ vm_map_is_64bit(map
)));
2222 commpage_handle
= commpage64_handle
;
2223 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
2224 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
2225 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
2226 objc_address
= _COMM_PAGE64_OBJC_BASE
;
2227 commpage_text_handle
= commpage_text64_handle
;
2228 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
2230 commpage_handle
= commpage32_handle
;
2232 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
2233 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
2234 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
2235 objc_address
= _COMM_PAGE32_OBJC_BASE
;
2236 commpage_text_handle
= commpage_text32_handle
;
2237 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
2240 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
2241 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
2242 /* the commpage is properly aligned or sized for pmap-nesting */
2243 vm_flags
|= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
);
2245 /* map the comm page in the task's address space */
2246 assert(commpage_handle
!= IPC_PORT_NULL
);
2247 kr
= vm_map_enter_mem_object(
2259 if (kr
!= KERN_SUCCESS
) {
2260 SHARED_REGION_TRACE_ERROR(
2261 ("commpage: enter(%p,0x%llx,0x%llx) "
2262 "commpage %p mapping failed 0x%x\n",
2263 (void *)VM_KERNEL_ADDRPERM(map
),
2264 (long long)commpage_address
,
2265 (long long)commpage_size
,
2266 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
2269 /* map the comm text page in the task's address space */
2270 assert(commpage_text_handle
!= IPC_PORT_NULL
);
2271 kr
= vm_map_enter_mem_object(
2273 &commpage_text_address
,
2277 commpage_text_handle
,
2280 VM_PROT_READ
|VM_PROT_EXECUTE
,
2281 VM_PROT_READ
|VM_PROT_EXECUTE
,
2283 if (kr
!= KERN_SUCCESS
) {
2284 SHARED_REGION_TRACE_ERROR(
2285 ("commpage text: enter(%p,0x%llx,0x%llx) "
2286 "commpage text %p mapping failed 0x%x\n",
2287 (void *)VM_KERNEL_ADDRPERM(map
),
2288 (long long)commpage_text_address
,
2289 (long long)commpage_text_size
,
2290 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2294 * Since we're here, we also pre-allocate some virtual space for the
2295 * Objective-C run-time, if needed...
2297 if (objc_size
!= 0) {
2298 kr
= vm_map_enter_mem_object(
2303 VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
,
2309 VM_INHERIT_DEFAULT
);
2310 if (kr
!= KERN_SUCCESS
) {
2311 SHARED_REGION_TRACE_ERROR(
2312 ("commpage: enter(%p,0x%llx,0x%llx) "
2313 "objc mapping failed 0x%x\n",
2314 (void *)VM_KERNEL_ADDRPERM(map
),
2315 (long long)objc_address
,
2316 (long long)objc_size
, kr
));
2320 SHARED_REGION_TRACE_DEBUG(
2321 ("commpage: enter(%p,%p) <- 0x%x\n",
2322 (void *)VM_KERNEL_ADDRPERM(map
),
2323 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2328 vm_shared_region_slide(uint32_t slide
,
2329 mach_vm_offset_t entry_start_address
,
2330 mach_vm_size_t entry_size
,
2331 mach_vm_offset_t slide_start
,
2332 mach_vm_size_t slide_size
,
2333 memory_object_control_t sr_file_control
)
2335 void *slide_info_entry
= NULL
;
2337 vm_shared_region_t sr
;
2339 SHARED_REGION_TRACE_DEBUG(
2340 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2341 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2343 sr
= vm_shared_region_get(current_task());
2345 printf("%s: no shared region?\n", __FUNCTION__
);
2346 SHARED_REGION_TRACE_DEBUG(
2347 ("vm_shared_region_slide: <- %d (no shared region)\n",
2349 return KERN_FAILURE
;
2353 * Protect from concurrent access.
2355 vm_shared_region_lock();
2356 while(sr
->sr_slide_in_progress
) {
2357 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
2360 || shared_region_completed_slide
2362 vm_shared_region_unlock();
2364 vm_shared_region_deallocate(sr
);
2365 printf("%s: shared region already slid?\n", __FUNCTION__
);
2366 SHARED_REGION_TRACE_DEBUG(
2367 ("vm_shared_region_slide: <- %d (already slid)\n",
2369 return KERN_FAILURE
;
2372 sr
->sr_slide_in_progress
= TRUE
;
2373 vm_shared_region_unlock();
2375 if((error
= vm_shared_region_slide_init(sr
, slide_size
, entry_start_address
, entry_size
, slide
, sr_file_control
))) {
2376 printf("slide_info initialization failed with kr=%d\n", error
);
2380 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
2381 if (slide_info_entry
== NULL
){
2382 error
= KERN_FAILURE
;
2384 error
= copyin((user_addr_t
)slide_start
,
2386 (vm_size_t
)slide_size
);
2388 error
= KERN_INVALID_ADDRESS
;
2395 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
2396 error
= KERN_INVALID_ARGUMENT
;
2397 printf("Sanity Check failed for slide_info\n");
2400 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2401 (void*)(uintptr_t)entry_start_address
,
2402 (unsigned long)entry_size
,
2403 (unsigned long)slide_size
);
2407 vm_shared_region_lock();
2409 assert(sr
->sr_slide_in_progress
);
2410 assert(sr
->sr_slid
== FALSE
);
2411 sr
->sr_slide_in_progress
= FALSE
;
2412 thread_wakeup(&sr
->sr_slide_in_progress
);
2414 if (error
== KERN_SUCCESS
) {
2418 * We don't know how to tear down a slid shared region today, because
2419 * we would have to invalidate all the pages that have been slid
2420 * atomically with respect to anyone mapping the shared region afresh.
2421 * Therefore, take a dangling reference to prevent teardown.
2424 shared_region_completed_slide
= TRUE
;
2426 vm_shared_region_unlock();
2428 vm_shared_region_deallocate(sr
);
2430 SHARED_REGION_TRACE_DEBUG(
2431 ("vm_shared_region_slide: <- %d\n",
2438 * This is called from powermanagement code to let kernel know the current source of power.
2439 * 0 if it is external source (connected to power )
2440 * 1 if it is internal power source ie battery
2443 #if defined(__i386__) || defined(__x86_64__)
2444 post_sys_powersource(int i
)
2446 post_sys_powersource(__unused
int i
)
2449 #if defined(__i386__) || defined(__x86_64__)
2450 post_sys_powersource_internal(i
, 0);
2451 #endif /* __i386__ || __x86_64__ */
2455 #if defined(__i386__) || defined(__x86_64__)
2457 post_sys_powersource_internal(int i
, int internal
)
2460 __system_power_source
= i
;
2462 if (__commpage_setup
!= 0) {
2463 if (__system_power_source
!= 0)
2464 commpage_set_spin_count(0);
2466 commpage_set_spin_count(MP_SPIN_TRIES
);
2469 #endif /* __i386__ || __x86_64__ */