2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 /* "dyld" uses this to figure out what the kernel supports */
116 int shared_region_version
= 3;
118 /* trace level, output is sent to the system log file */
119 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
121 /* should local (non-chroot) shared regions persist when no task uses them ? */
122 int shared_region_persistence
= 0; /* no by default */
124 /* delay before reclaiming an unused shared region */
125 int shared_region_destroy_delay
= 120; /* in seconds */
127 /* indicate if the shared region has been slid. Only one region can be slid */
128 boolean_t shared_region_completed_slide
= FALSE
;
130 /* this lock protects all the shared region data structures */
131 lck_grp_t
*vm_shared_region_lck_grp
;
132 lck_mtx_t vm_shared_region_lock
;
134 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
135 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
136 #define vm_shared_region_sleep(event, interruptible) \
137 lck_mtx_sleep(&vm_shared_region_lock, \
142 /* the list of currently available shared regions (one per environment) */
143 queue_head_t vm_shared_region_queue
;
145 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
146 static vm_shared_region_t
vm_shared_region_create(
150 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
152 static void vm_shared_region_timeout(thread_call_param_t param0
,
153 thread_call_param_t param1
);
155 static int __commpage_setup
= 0;
156 #if defined(__i386__) || defined(__x86_64__)
157 static int __system_power_source
= 1; /* init to extrnal power source */
158 static void post_sys_powersource_internal(int i
, int internal
);
159 #endif /* __i386__ || __x86_64__ */
163 * Initialize the module...
166 vm_shared_region_init(void)
168 SHARED_REGION_TRACE_DEBUG(
169 ("shared_region: -> init\n"));
171 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
173 lck_mtx_init(&vm_shared_region_lock
,
174 vm_shared_region_lck_grp
,
177 queue_init(&vm_shared_region_queue
);
179 SHARED_REGION_TRACE_DEBUG(
180 ("shared_region: <- init\n"));
184 * Retrieve a task's shared region and grab an extra reference to
185 * make sure it doesn't disappear while the caller is using it.
186 * The caller is responsible for consuming that extra reference if
190 vm_shared_region_get(
193 vm_shared_region_t shared_region
;
195 SHARED_REGION_TRACE_DEBUG(
196 ("shared_region: -> get(%p)\n",
200 vm_shared_region_lock();
201 shared_region
= task
->shared_region
;
203 assert(shared_region
->sr_ref_count
> 0);
204 vm_shared_region_reference_locked(shared_region
);
206 vm_shared_region_unlock();
209 SHARED_REGION_TRACE_DEBUG(
210 ("shared_region: get(%p) <- %p\n",
211 task
, shared_region
));
213 return shared_region
;
217 * Get the base address of the shared region.
218 * That's the address at which it needs to be mapped in the process's address
220 * No need to lock since this data is set when the shared region is
221 * created and is never modified after that. The caller must hold an extra
222 * reference on the shared region to prevent it from being destroyed.
225 vm_shared_region_base_address(
226 vm_shared_region_t shared_region
)
228 SHARED_REGION_TRACE_DEBUG(
229 ("shared_region: -> base_address(%p)\n",
231 assert(shared_region
->sr_ref_count
> 1);
232 SHARED_REGION_TRACE_DEBUG(
233 ("shared_region: base_address(%p) <- 0x%llx\n",
234 shared_region
, (long long)shared_region
->sr_base_address
));
235 return shared_region
->sr_base_address
;
239 * Get the size of the shared region.
240 * That's the size that needs to be mapped in the process's address
242 * No need to lock since this data is set when the shared region is
243 * created and is never modified after that. The caller must hold an extra
244 * reference on the shared region to prevent it from being destroyed.
247 vm_shared_region_size(
248 vm_shared_region_t shared_region
)
250 SHARED_REGION_TRACE_DEBUG(
251 ("shared_region: -> size(%p)\n",
253 assert(shared_region
->sr_ref_count
> 1);
254 SHARED_REGION_TRACE_DEBUG(
255 ("shared_region: size(%p) <- 0x%llx\n",
256 shared_region
, (long long)shared_region
->sr_size
));
257 return shared_region
->sr_size
;
261 * Get the memory entry of the shared region.
262 * That's the "memory object" that needs to be mapped in the process's address
264 * No need to lock since this data is set when the shared region is
265 * created and is never modified after that. The caller must hold an extra
266 * reference on the shared region to prevent it from being destroyed.
269 vm_shared_region_mem_entry(
270 vm_shared_region_t shared_region
)
272 SHARED_REGION_TRACE_DEBUG(
273 ("shared_region: -> mem_entry(%p)\n",
275 assert(shared_region
->sr_ref_count
> 1);
276 SHARED_REGION_TRACE_DEBUG(
277 ("shared_region: mem_entry(%p) <- %p\n",
278 shared_region
, shared_region
->sr_mem_entry
));
279 return shared_region
->sr_mem_entry
;
283 * Set the shared region the process should use.
284 * A NULL new shared region means that we just want to release the old
286 * The caller should already have an extra reference on the new shared region
287 * (if any). We release a reference on the old shared region (if any).
290 vm_shared_region_set(
292 vm_shared_region_t new_shared_region
)
294 vm_shared_region_t old_shared_region
;
296 SHARED_REGION_TRACE_DEBUG(
297 ("shared_region: -> set(%p, %p)\n",
298 task
, new_shared_region
));
301 vm_shared_region_lock();
303 old_shared_region
= task
->shared_region
;
304 if (new_shared_region
) {
305 assert(new_shared_region
->sr_ref_count
> 0);
308 task
->shared_region
= new_shared_region
;
310 vm_shared_region_unlock();
313 if (old_shared_region
) {
314 assert(old_shared_region
->sr_ref_count
> 0);
315 vm_shared_region_deallocate(old_shared_region
);
318 SHARED_REGION_TRACE_DEBUG(
319 ("shared_region: set(%p) <- old=%p new=%p\n",
320 task
, old_shared_region
, new_shared_region
));
324 * Lookup up the shared region for the desired environment.
325 * If none is found, create a new (empty) one.
326 * Grab an extra reference on the returned shared region, to make sure
327 * it doesn't get destroyed before the caller is done with it. The caller
328 * is responsible for consuming that extra reference if necessary.
331 vm_shared_region_lookup(
336 vm_shared_region_t shared_region
;
337 vm_shared_region_t new_shared_region
;
339 SHARED_REGION_TRACE_DEBUG(
340 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
341 root_dir
, cputype
, is_64bit
));
343 shared_region
= NULL
;
344 new_shared_region
= NULL
;
346 vm_shared_region_lock();
348 queue_iterate(&vm_shared_region_queue
,
352 assert(shared_region
->sr_ref_count
> 0);
353 if (shared_region
->sr_cpu_type
== cputype
&&
354 shared_region
->sr_root_dir
== root_dir
&&
355 shared_region
->sr_64bit
== is_64bit
) {
356 /* found a match ! */
357 vm_shared_region_reference_locked(shared_region
);
361 if (new_shared_region
== NULL
) {
362 /* no match: create a new one */
363 vm_shared_region_unlock();
364 new_shared_region
= vm_shared_region_create(root_dir
,
367 /* do the lookup again, in case we lost a race */
368 vm_shared_region_lock();
371 /* still no match: use our new one */
372 shared_region
= new_shared_region
;
373 new_shared_region
= NULL
;
374 queue_enter(&vm_shared_region_queue
,
382 vm_shared_region_unlock();
384 if (new_shared_region
) {
386 * We lost a race with someone else to create a new shared
387 * region for that environment. Get rid of our unused one.
389 assert(new_shared_region
->sr_ref_count
== 1);
390 new_shared_region
->sr_ref_count
--;
391 vm_shared_region_destroy(new_shared_region
);
392 new_shared_region
= NULL
;
395 SHARED_REGION_TRACE_DEBUG(
396 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
397 root_dir
, cputype
, is_64bit
, shared_region
));
399 assert(shared_region
->sr_ref_count
> 0);
400 return shared_region
;
404 * Take an extra reference on a shared region.
405 * The vm_shared_region_lock should already be held by the caller.
408 vm_shared_region_reference_locked(
409 vm_shared_region_t shared_region
)
412 lck_mtx_assert(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
415 SHARED_REGION_TRACE_DEBUG(
416 ("shared_region: -> reference_locked(%p)\n",
418 assert(shared_region
->sr_ref_count
> 0);
419 shared_region
->sr_ref_count
++;
421 if (shared_region
->sr_timer_call
!= NULL
) {
424 /* cancel and free any pending timeout */
425 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
427 thread_call_free(shared_region
->sr_timer_call
);
428 shared_region
->sr_timer_call
= NULL
;
429 /* release the reference held by the cancelled timer */
430 shared_region
->sr_ref_count
--;
432 /* the timer will drop the reference and free itself */
436 SHARED_REGION_TRACE_DEBUG(
437 ("shared_region: reference_locked(%p) <- %d\n",
438 shared_region
, shared_region
->sr_ref_count
));
442 * Release a reference on the shared region.
443 * Destroy it if there are no references left.
446 vm_shared_region_deallocate(
447 vm_shared_region_t shared_region
)
449 SHARED_REGION_TRACE_DEBUG(
450 ("shared_region: -> deallocate(%p)\n",
453 vm_shared_region_lock();
455 assert(shared_region
->sr_ref_count
> 0);
457 if (shared_region
->sr_root_dir
== NULL
) {
459 * Local (i.e. based on the boot volume) shared regions
460 * can persist or not based on the "shared_region_persistence"
462 * Make sure that this one complies.
464 if (shared_region_persistence
&&
465 !shared_region
->sr_persists
) {
466 /* make this one persistent */
467 shared_region
->sr_ref_count
++;
468 shared_region
->sr_persists
= TRUE
;
469 } else if (!shared_region_persistence
&&
470 shared_region
->sr_persists
) {
471 /* make this one no longer persistent */
472 assert(shared_region
->sr_ref_count
> 1);
473 shared_region
->sr_ref_count
--;
474 shared_region
->sr_persists
= FALSE
;
478 assert(shared_region
->sr_ref_count
> 0);
479 shared_region
->sr_ref_count
--;
480 SHARED_REGION_TRACE_DEBUG(
481 ("shared_region: deallocate(%p): ref now %d\n",
482 shared_region
, shared_region
->sr_ref_count
));
484 if (shared_region
->sr_ref_count
== 0) {
487 if (shared_region
->sr_timer_call
== NULL
) {
488 /* hold one reference for the timer */
489 assert(! shared_region
->sr_mapping_in_progress
);
490 shared_region
->sr_ref_count
++;
492 /* set up the timer */
493 shared_region
->sr_timer_call
= thread_call_allocate(
494 (thread_call_func_t
) vm_shared_region_timeout
,
495 (thread_call_param_t
) shared_region
);
497 /* schedule the timer */
498 clock_interval_to_deadline(shared_region_destroy_delay
,
501 thread_call_enter_delayed(shared_region
->sr_timer_call
,
504 SHARED_REGION_TRACE_DEBUG(
505 ("shared_region: deallocate(%p): armed timer\n",
508 vm_shared_region_unlock();
510 /* timer expired: let go of this shared region */
513 * Remove it from the queue first, so no one can find
516 queue_remove(&vm_shared_region_queue
,
520 vm_shared_region_unlock();
521 /* ... and destroy it */
522 vm_shared_region_destroy(shared_region
);
523 shared_region
= NULL
;
526 vm_shared_region_unlock();
529 SHARED_REGION_TRACE_DEBUG(
530 ("shared_region: deallocate(%p) <-\n",
535 vm_shared_region_timeout(
536 thread_call_param_t param0
,
537 __unused thread_call_param_t param1
)
539 vm_shared_region_t shared_region
;
541 shared_region
= (vm_shared_region_t
) param0
;
543 vm_shared_region_deallocate(shared_region
);
547 * Create a new (empty) shared region for a new environment.
549 static vm_shared_region_t
550 vm_shared_region_create(
556 vm_named_entry_t mem_entry
;
557 ipc_port_t mem_entry_port
;
558 vm_shared_region_t shared_region
;
560 mach_vm_offset_t base_address
, pmap_nesting_start
;
561 mach_vm_size_t size
, pmap_nesting_size
;
563 SHARED_REGION_TRACE_DEBUG(
564 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
565 root_dir
, cputype
, is_64bit
));
570 mem_entry_port
= IPC_PORT_NULL
;
571 sub_map
= VM_MAP_NULL
;
573 /* create a new shared region structure... */
574 shared_region
= kalloc(sizeof (*shared_region
));
575 if (shared_region
== NULL
) {
576 SHARED_REGION_TRACE_ERROR(
577 ("shared_region: create: couldn't allocate\n"));
581 /* figure out the correct settings for the desired environment */
585 base_address
= SHARED_REGION_BASE_X86_64
;
586 size
= SHARED_REGION_SIZE_X86_64
;
587 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
588 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
590 case CPU_TYPE_POWERPC
:
591 base_address
= SHARED_REGION_BASE_PPC64
;
592 size
= SHARED_REGION_SIZE_PPC64
;
593 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
594 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
597 SHARED_REGION_TRACE_ERROR(
598 ("shared_region: create: unknown cpu type %d\n",
600 kfree(shared_region
, sizeof (*shared_region
));
601 shared_region
= NULL
;
607 base_address
= SHARED_REGION_BASE_I386
;
608 size
= SHARED_REGION_SIZE_I386
;
609 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
610 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
612 case CPU_TYPE_POWERPC
:
613 base_address
= SHARED_REGION_BASE_PPC
;
614 size
= SHARED_REGION_SIZE_PPC
;
615 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
616 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
620 base_address
= SHARED_REGION_BASE_ARM
;
621 size
= SHARED_REGION_SIZE_ARM
;
622 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
623 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
625 #endif /* CPU_TYPE_ARM */
627 SHARED_REGION_TRACE_ERROR(
628 ("shared_region: create: unknown cpu type %d\n",
630 kfree(shared_region
, sizeof (*shared_region
));
631 shared_region
= NULL
;
637 /* create a memory entry structure and a Mach port handle */
638 kr
= mach_memory_entry_allocate(&mem_entry
,
640 if (kr
!= KERN_SUCCESS
) {
641 kfree(shared_region
, sizeof (*shared_region
));
642 shared_region
= NULL
;
643 SHARED_REGION_TRACE_ERROR(
644 ("shared_region: create: "
645 "couldn't allocate mem_entry\n"));
649 /* create a VM sub map and its pmap */
650 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
653 if (sub_map
== VM_MAP_NULL
) {
654 ipc_port_release_send(mem_entry_port
);
655 kfree(shared_region
, sizeof (*shared_region
));
656 shared_region
= NULL
;
657 SHARED_REGION_TRACE_ERROR(
658 ("shared_region: create: "
659 "couldn't allocate map\n"));
663 /* make the memory entry point to the VM sub map */
664 mem_entry
->is_sub_map
= TRUE
;
665 mem_entry
->backing
.map
= sub_map
;
666 mem_entry
->size
= size
;
667 mem_entry
->protection
= VM_PROT_ALL
;
669 /* make the shared region point at the memory entry */
670 shared_region
->sr_mem_entry
= mem_entry_port
;
672 /* fill in the shared region's environment and settings */
673 shared_region
->sr_base_address
= base_address
;
674 shared_region
->sr_size
= size
;
675 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
676 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
677 shared_region
->sr_cpu_type
= cputype
;
678 shared_region
->sr_64bit
= is_64bit
;
679 shared_region
->sr_root_dir
= root_dir
;
681 queue_init(&shared_region
->sr_q
);
682 shared_region
->sr_mapping_in_progress
= FALSE
;
683 shared_region
->sr_persists
= FALSE
;
684 shared_region
->sr_timer_call
= NULL
;
685 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
687 /* grab a reference for the caller */
688 shared_region
->sr_ref_count
= 1;
692 SHARED_REGION_TRACE_INFO(
693 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
694 "base=0x%llx,size=0x%llx) <- "
695 "%p mem=(%p,%p) map=%p pmap=%p\n",
696 root_dir
, cputype
, is_64bit
, (long long)base_address
,
697 (long long)size
, shared_region
,
698 mem_entry_port
, mem_entry
, sub_map
, sub_map
->pmap
));
700 SHARED_REGION_TRACE_INFO(
701 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
702 "base=0x%llx,size=0x%llx) <- NULL",
703 root_dir
, cputype
, is_64bit
, (long long)base_address
,
706 return shared_region
;
710 * Destroy a now-unused shared region.
711 * The shared region is no longer in the queue and can not be looked up.
714 vm_shared_region_destroy(
715 vm_shared_region_t shared_region
)
717 vm_named_entry_t mem_entry
;
720 SHARED_REGION_TRACE_INFO(
721 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
723 shared_region
->sr_root_dir
,
724 shared_region
->sr_cpu_type
,
725 shared_region
->sr_64bit
));
727 assert(shared_region
->sr_ref_count
== 0);
728 assert(!shared_region
->sr_persists
);
730 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
731 assert(mem_entry
->is_sub_map
);
732 assert(!mem_entry
->internal
);
733 assert(!mem_entry
->is_pager
);
734 map
= mem_entry
->backing
.map
;
737 * Clean up the pmap first. The virtual addresses that were
738 * entered in this possibly "nested" pmap may have different values
739 * than the VM map's min and max offsets, if the VM sub map was
740 * mapped at a non-zero offset in the processes' main VM maps, which
741 * is usually the case, so the clean-up we do in vm_map_destroy() would
745 pmap_remove(map
->pmap
,
746 shared_region
->sr_base_address
,
747 (shared_region
->sr_base_address
+
748 shared_region
->sr_size
));
752 * Release our (one and only) handle on the memory entry.
753 * This will generate a no-senders notification, which will be processed
754 * by ipc_kobject_notify(), which will release the one and only
755 * reference on the memory entry and cause it to be destroyed, along
756 * with the VM sub map and its pmap.
758 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
760 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
762 if (shared_region
->sr_timer_call
) {
763 thread_call_free(shared_region
->sr_timer_call
);
766 if ((slide_info
.slide_info_entry
!= NULL
) && (slide_info
.sr
== shared_region
)) {
767 kmem_free(kernel_map
,
768 (vm_offset_t
) slide_info
.slide_info_entry
,
769 (vm_size_t
) slide_info
.slide_info_size
);
770 vm_object_deallocate(slide_info
.slide_object
);
771 slide_info
.slide_object
= NULL
;
772 slide_info
.start
= 0;
774 slide_info
.slide
= 0;
775 slide_info
.sr
= NULL
;
776 slide_info
.slide_info_entry
= NULL
;
777 slide_info
.slide_info_size
= 0;
778 shared_region_completed_slide
= FALSE
;
781 /* release the shared region structure... */
782 kfree(shared_region
, sizeof (*shared_region
));
784 SHARED_REGION_TRACE_DEBUG(
785 ("shared_region: destroy(%p) <-\n",
787 shared_region
= NULL
;
792 * Gets the address of the first (in time) mapping in the shared region.
795 vm_shared_region_start_address(
796 vm_shared_region_t shared_region
,
797 mach_vm_offset_t
*start_address
)
800 mach_vm_offset_t sr_base_address
;
801 mach_vm_offset_t sr_first_mapping
;
803 SHARED_REGION_TRACE_DEBUG(
804 ("shared_region: -> start_address(%p)\n",
806 assert(shared_region
->sr_ref_count
> 1);
808 vm_shared_region_lock();
811 * Wait if there's another thread establishing a mapping
812 * in this shared region right when we're looking at it.
813 * We want a consistent view of the map...
815 while (shared_region
->sr_mapping_in_progress
) {
816 /* wait for our turn... */
817 assert(shared_region
->sr_ref_count
> 1);
818 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
821 assert(! shared_region
->sr_mapping_in_progress
);
822 assert(shared_region
->sr_ref_count
> 1);
824 sr_base_address
= shared_region
->sr_base_address
;
825 sr_first_mapping
= shared_region
->sr_first_mapping
;
827 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
828 /* shared region is empty */
829 kr
= KERN_INVALID_ADDRESS
;
832 *start_address
= sr_base_address
+ sr_first_mapping
;
835 vm_shared_region_unlock();
837 SHARED_REGION_TRACE_DEBUG(
838 ("shared_region: start_address(%p) <- 0x%llx\n",
839 shared_region
, (long long)shared_region
->sr_base_address
));
845 vm_shared_region_undo_mappings(
847 mach_vm_offset_t sr_base_address
,
848 struct shared_file_mapping_np
*mappings
,
849 unsigned int mappings_count
)
852 vm_shared_region_t shared_region
= NULL
;
853 boolean_t reset_shared_region_state
= FALSE
;
855 shared_region
= vm_shared_region_get(current_task());
856 if (shared_region
== NULL
) {
857 printf("Failed to undo mappings because of NULL shared region.\n");
862 if (sr_map
== NULL
) {
863 ipc_port_t sr_handle
;
864 vm_named_entry_t sr_mem_entry
;
866 vm_shared_region_lock();
867 assert(shared_region
->sr_ref_count
> 1);
869 while (shared_region
->sr_mapping_in_progress
) {
870 /* wait for our turn... */
871 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
874 assert(! shared_region
->sr_mapping_in_progress
);
875 assert(shared_region
->sr_ref_count
> 1);
876 /* let others know we're working in this shared region */
877 shared_region
->sr_mapping_in_progress
= TRUE
;
879 vm_shared_region_unlock();
881 reset_shared_region_state
= TRUE
;
883 /* no need to lock because this data is never modified... */
884 sr_handle
= shared_region
->sr_mem_entry
;
885 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
886 sr_map
= sr_mem_entry
->backing
.map
;
887 sr_base_address
= shared_region
->sr_base_address
;
890 * Undo the mappings we've established so far.
892 for (j
= 0; j
< mappings_count
; j
++) {
895 if (mappings
[j
].sfm_size
== 0) {
897 * We didn't establish this
898 * mapping, so nothing to undo.
902 SHARED_REGION_TRACE_INFO(
903 ("shared_region: mapping[%d]: "
907 "maxprot:0x%x prot:0x%x: "
910 (long long)mappings
[j
].sfm_address
,
911 (long long)mappings
[j
].sfm_size
,
912 (long long)mappings
[j
].sfm_file_offset
,
913 mappings
[j
].sfm_max_prot
,
914 mappings
[j
].sfm_init_prot
));
915 kr2
= mach_vm_deallocate(
917 (mappings
[j
].sfm_address
-
919 mappings
[j
].sfm_size
);
920 assert(kr2
== KERN_SUCCESS
);
924 * This is how check_np() knows if the shared region
925 * is mapped. So clear it here.
927 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
929 if (reset_shared_region_state
) {
930 vm_shared_region_lock();
931 assert(shared_region
->sr_ref_count
> 1);
932 assert(shared_region
->sr_mapping_in_progress
);
933 /* we're done working on that shared region */
934 shared_region
->sr_mapping_in_progress
= FALSE
;
935 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
936 vm_shared_region_unlock();
937 reset_shared_region_state
= FALSE
;
940 vm_shared_region_deallocate(shared_region
);
944 * Establish some mappings of a file in the shared region.
945 * This is used by "dyld" via the shared_region_map_np() system call
946 * to populate the shared region with the appropriate shared cache.
948 * One could also call it several times to incrementally load several
949 * libraries, as long as they do not overlap.
950 * It will return KERN_SUCCESS if the mappings were successfully established
951 * or if they were already established identically by another process.
954 vm_shared_region_map_file(
955 vm_shared_region_t shared_region
,
956 unsigned int mappings_count
,
957 struct shared_file_mapping_np
*mappings
,
958 memory_object_control_t file_control
,
959 memory_object_size_t file_size
,
961 struct shared_file_mapping_np
*mapping_to_slide
)
964 vm_object_t file_object
;
965 ipc_port_t sr_handle
;
966 vm_named_entry_t sr_mem_entry
;
968 mach_vm_offset_t sr_base_address
;
970 mach_port_t map_port
;
971 vm_map_offset_t target_address
;
973 vm_object_size_t obj_size
;
974 boolean_t found_mapping_to_slide
= FALSE
;
979 vm_shared_region_lock();
980 assert(shared_region
->sr_ref_count
> 1);
982 if (shared_region
->sr_root_dir
!= root_dir
) {
984 * This shared region doesn't match the current root
985 * directory of this process. Deny the mapping to
986 * avoid tainting the shared region with something that
987 * doesn't quite belong into it.
989 vm_shared_region_unlock();
990 kr
= KERN_PROTECTION_FAILURE
;
995 * Make sure we handle only one mapping at a time in a given
996 * shared region, to avoid race conditions. This should not
997 * happen frequently...
999 while (shared_region
->sr_mapping_in_progress
) {
1000 /* wait for our turn... */
1001 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1004 assert(! shared_region
->sr_mapping_in_progress
);
1005 assert(shared_region
->sr_ref_count
> 1);
1006 /* let others know we're working in this shared region */
1007 shared_region
->sr_mapping_in_progress
= TRUE
;
1009 vm_shared_region_unlock();
1011 /* no need to lock because this data is never modified... */
1012 sr_handle
= shared_region
->sr_mem_entry
;
1013 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1014 sr_map
= sr_mem_entry
->backing
.map
;
1015 sr_base_address
= shared_region
->sr_base_address
;
1017 SHARED_REGION_TRACE_DEBUG(
1018 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1019 shared_region
, mappings_count
, mappings
,
1020 file_control
, file_size
));
1022 /* get the VM object associated with the file to be mapped */
1023 file_object
= memory_object_control_to_vm_object(file_control
);
1025 /* establish the mappings */
1026 for (i
= 0; i
< mappings_count
; i
++) {
1027 SHARED_REGION_TRACE_INFO(
1028 ("shared_region: mapping[%d]: "
1029 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1030 "maxprot:0x%x prot:0x%x\n",
1032 (long long)mappings
[i
].sfm_address
,
1033 (long long)mappings
[i
].sfm_size
,
1034 (long long)mappings
[i
].sfm_file_offset
,
1035 mappings
[i
].sfm_max_prot
,
1036 mappings
[i
].sfm_init_prot
));
1038 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1039 /* zero-filled memory */
1040 map_port
= MACH_PORT_NULL
;
1042 /* file-backed memory */
1043 map_port
= (ipc_port_t
) file_object
->pager
;
1046 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1048 * This is the mapping that needs to be slid.
1050 if (found_mapping_to_slide
== TRUE
) {
1051 SHARED_REGION_TRACE_INFO(
1052 ("shared_region: mapping[%d]: "
1053 "address:0x%016llx size:0x%016llx "
1055 "maxprot:0x%x prot:0x%x "
1056 "will not be slid as only one such mapping is allowed...\n",
1058 (long long)mappings
[i
].sfm_address
,
1059 (long long)mappings
[i
].sfm_size
,
1060 (long long)mappings
[i
].sfm_file_offset
,
1061 mappings
[i
].sfm_max_prot
,
1062 mappings
[i
].sfm_init_prot
));
1064 if (mapping_to_slide
!= NULL
) {
1065 mapping_to_slide
->sfm_file_offset
= mappings
[i
].sfm_file_offset
;
1066 mapping_to_slide
->sfm_size
= mappings
[i
].sfm_size
;
1067 found_mapping_to_slide
= TRUE
;
1072 /* mapping's address is relative to the shared region base */
1074 mappings
[i
].sfm_address
- sr_base_address
;
1076 /* establish that mapping, OK if it's "already" there */
1077 if (map_port
== MACH_PORT_NULL
) {
1079 * We want to map some anonymous memory in a
1081 * We have to create the VM object now, so that it
1082 * can be mapped "copy-on-write".
1084 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
);
1085 object
= vm_object_allocate(obj_size
);
1086 if (object
== VM_OBJECT_NULL
) {
1087 kr
= KERN_RESOURCE_SHORTAGE
;
1092 vm_map_round_page(mappings
[i
].sfm_size
),
1094 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1098 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1099 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1100 VM_INHERIT_DEFAULT
);
1103 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1104 kr
= vm_map_enter_mem_object(
1107 vm_map_round_page(mappings
[i
].sfm_size
),
1109 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1111 mappings
[i
].sfm_file_offset
,
1113 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1114 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1115 VM_INHERIT_DEFAULT
);
1118 if (kr
!= KERN_SUCCESS
) {
1119 if (map_port
== MACH_PORT_NULL
) {
1121 * Get rid of the VM object we just created
1122 * but failed to map.
1124 vm_object_deallocate(object
);
1125 object
= VM_OBJECT_NULL
;
1127 if (kr
== KERN_MEMORY_PRESENT
) {
1129 * This exact mapping was already there:
1132 SHARED_REGION_TRACE_INFO(
1133 ("shared_region: mapping[%d]: "
1134 "address:0x%016llx size:0x%016llx "
1136 "maxprot:0x%x prot:0x%x "
1137 "already mapped...\n",
1139 (long long)mappings
[i
].sfm_address
,
1140 (long long)mappings
[i
].sfm_size
,
1141 (long long)mappings
[i
].sfm_file_offset
,
1142 mappings
[i
].sfm_max_prot
,
1143 mappings
[i
].sfm_init_prot
));
1145 * We didn't establish this mapping ourselves;
1146 * let's reset its size, so that we do not
1147 * attempt to undo it if an error occurs later.
1149 mappings
[i
].sfm_size
= 0;
1152 /* this mapping failed ! */
1153 SHARED_REGION_TRACE_ERROR(
1154 ("shared_region: mapping[%d]: "
1155 "address:0x%016llx size:0x%016llx "
1157 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1159 (long long)mappings
[i
].sfm_address
,
1160 (long long)mappings
[i
].sfm_size
,
1161 (long long)mappings
[i
].sfm_file_offset
,
1162 mappings
[i
].sfm_max_prot
,
1163 mappings
[i
].sfm_init_prot
,
1166 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1173 * Record the first (chronologically) mapping in
1174 * this shared region.
1175 * We're protected by "sr_mapping_in_progress" here,
1176 * so no need to lock "shared_region".
1178 if (shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1179 shared_region
->sr_first_mapping
= target_address
;
1183 vm_shared_region_lock();
1184 assert(shared_region
->sr_ref_count
> 1);
1185 assert(shared_region
->sr_mapping_in_progress
);
1186 /* we're done working on that shared region */
1187 shared_region
->sr_mapping_in_progress
= FALSE
;
1188 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1189 vm_shared_region_unlock();
1192 SHARED_REGION_TRACE_DEBUG(
1193 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1194 shared_region
, mappings_count
, mappings
,
1195 file_control
, file_size
, kr
));
1200 * Enter the appropriate shared region into "map" for "task".
1201 * This involves looking up the shared region (and possibly creating a new
1202 * one) for the desired environment, then mapping the VM sub map into the
1203 * task's VM "map", with the appropriate level of pmap-nesting.
1206 vm_shared_region_enter(
1207 struct _vm_map
*map
,
1213 vm_shared_region_t shared_region
;
1214 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1215 vm_map_size_t sr_size
, mapping_size
;
1216 vm_map_offset_t sr_pmap_nesting_start
;
1217 vm_map_size_t sr_pmap_nesting_size
;
1218 ipc_port_t sr_handle
;
1221 is_64bit
= task_has_64BitAddr(task
);
1223 SHARED_REGION_TRACE_DEBUG(
1224 ("shared_region: -> "
1225 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1226 map
, task
, fsroot
, cpu
, is_64bit
));
1228 /* lookup (create if needed) the shared region for this environment */
1229 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, is_64bit
);
1230 if (shared_region
== NULL
) {
1231 /* this should not happen ! */
1232 SHARED_REGION_TRACE_ERROR(
1233 ("shared_region: -> "
1234 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1235 "lookup failed !\n",
1236 map
, task
, fsroot
, cpu
, is_64bit
));
1237 //panic("shared_region_enter: lookup failed\n");
1238 return KERN_FAILURE
;
1241 /* let the task use that shared region */
1242 vm_shared_region_set(task
, shared_region
);
1245 /* no need to lock since this data is never modified */
1246 sr_address
= shared_region
->sr_base_address
;
1247 sr_size
= shared_region
->sr_size
;
1248 sr_handle
= shared_region
->sr_mem_entry
;
1249 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1250 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1253 * Start mapping the shared region's VM sub map into the task's VM map.
1257 if (sr_pmap_nesting_start
> sr_address
) {
1258 /* we need to map a range without pmap-nesting first */
1259 target_address
= sr_address
;
1260 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1261 kr
= vm_map_enter_mem_object(
1273 if (kr
!= KERN_SUCCESS
) {
1274 SHARED_REGION_TRACE_ERROR(
1275 ("shared_region: enter(%p,%p,%p,%d,%d): "
1276 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1277 map
, task
, fsroot
, cpu
, is_64bit
,
1278 (long long)target_address
,
1279 (long long)mapping_size
, sr_handle
, kr
));
1282 SHARED_REGION_TRACE_DEBUG(
1283 ("shared_region: enter(%p,%p,%p,%d,%d): "
1284 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1285 map
, task
, fsroot
, cpu
, is_64bit
,
1286 (long long)target_address
, (long long)mapping_size
,
1288 sr_offset
+= mapping_size
;
1289 sr_size
-= mapping_size
;
1292 * We may need to map several pmap-nested portions, due to platform
1293 * specific restrictions on pmap nesting.
1294 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1297 sr_pmap_nesting_size
> 0;
1298 sr_offset
+= mapping_size
,
1299 sr_size
-= mapping_size
,
1300 sr_pmap_nesting_size
-= mapping_size
) {
1301 target_address
= sr_address
+ sr_offset
;
1302 mapping_size
= sr_pmap_nesting_size
;
1303 if (mapping_size
> pmap_nesting_size_max
) {
1304 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1306 kr
= vm_map_enter_mem_object(
1311 (VM_FLAGS_FIXED
| VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
)),
1318 if (kr
!= KERN_SUCCESS
) {
1319 SHARED_REGION_TRACE_ERROR(
1320 ("shared_region: enter(%p,%p,%p,%d,%d): "
1321 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1322 map
, task
, fsroot
, cpu
, is_64bit
,
1323 (long long)target_address
,
1324 (long long)mapping_size
, sr_handle
, kr
));
1327 SHARED_REGION_TRACE_DEBUG(
1328 ("shared_region: enter(%p,%p,%p,%d,%d): "
1329 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1330 map
, task
, fsroot
, cpu
, is_64bit
,
1331 (long long)target_address
, (long long)mapping_size
,
1335 /* and there's some left to be mapped without pmap-nesting */
1336 target_address
= sr_address
+ sr_offset
;
1337 mapping_size
= sr_size
;
1338 kr
= vm_map_enter_mem_object(
1350 if (kr
!= KERN_SUCCESS
) {
1351 SHARED_REGION_TRACE_ERROR(
1352 ("shared_region: enter(%p,%p,%p,%d,%d): "
1353 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1354 map
, task
, fsroot
, cpu
, is_64bit
,
1355 (long long)target_address
,
1356 (long long)mapping_size
, sr_handle
, kr
));
1359 SHARED_REGION_TRACE_DEBUG(
1360 ("shared_region: enter(%p,%p,%p,%d,%d): "
1361 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1362 map
, task
, fsroot
, cpu
, is_64bit
,
1363 (long long)target_address
, (long long)mapping_size
,
1365 sr_offset
+= mapping_size
;
1366 sr_size
-= mapping_size
;
1368 assert(sr_size
== 0);
1371 SHARED_REGION_TRACE_DEBUG(
1372 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1373 map
, task
, fsroot
, cpu
, is_64bit
, kr
));
1377 #define SANE_SLIDE_INFO_SIZE (1024*1024) /*Can be changed if needed*/
1378 struct vm_shared_region_slide_info slide_info
;
1381 vm_shared_region_sliding_valid(uint32_t slide
) {
1383 kern_return_t kr
= KERN_SUCCESS
;
1385 if ((shared_region_completed_slide
== TRUE
) && slide
) {
1386 if (slide
!= slide_info
.slide
) {
1387 printf("Only one shared region can be slid\n");
1389 } else if (slide
== slide_info
.slide
) {
1391 * Request for sliding when we've
1392 * already done it with exactly the
1393 * same slide value before.
1394 * This isn't wrong technically but
1395 * we don't want to slide again and
1396 * so we return this value.
1398 kr
= KERN_INVALID_ARGUMENT
;
1405 vm_shared_region_slide_init(
1406 mach_vm_size_t slide_info_size
,
1407 mach_vm_offset_t start
,
1408 mach_vm_size_t size
,
1410 memory_object_control_t sr_file_control
)
1412 kern_return_t kr
= KERN_SUCCESS
;
1413 vm_object_t object
= VM_OBJECT_NULL
;
1414 vm_object_offset_t offset
= 0;
1416 vm_map_t map
=NULL
, cur_map
= NULL
;
1417 boolean_t is_map_locked
= FALSE
;
1419 if ((kr
= vm_shared_region_sliding_valid(slide
)) != KERN_SUCCESS
) {
1420 if (kr
== KERN_INVALID_ARGUMENT
) {
1422 * This will happen if we request sliding again
1423 * with the same slide value that was used earlier
1424 * for the very first sliding.
1431 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1432 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1437 if (sr_file_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1439 object
= memory_object_control_to_vm_object(sr_file_control
);
1440 vm_object_reference(object
);
1443 vm_object_lock_shared(object
);
1447 * Remove this entire "else" block and all "map" references
1448 * once we get rid of the shared_region_slide_np()
1451 vm_map_entry_t entry
= VM_MAP_ENTRY_NULL
;
1452 map
= current_map();
1453 vm_map_lock_read(map
);
1454 is_map_locked
= TRUE
;
1457 if(!vm_map_lookup_entry(map
, start
, &entry
)) {
1458 kr
= KERN_INVALID_ARGUMENT
;
1460 vm_object_t shadow_obj
= VM_OBJECT_NULL
;
1462 if (entry
->is_sub_map
== TRUE
) {
1463 map
= entry
->object
.sub_map
;
1464 start
-= entry
->vme_start
;
1465 start
+= entry
->offset
;
1466 vm_map_lock_read(map
);
1467 vm_map_unlock_read(cur_map
);
1470 object
= entry
->object
.vm_object
;
1471 offset
= (start
- entry
->vme_start
) + entry
->offset
;
1474 vm_object_lock_shared(object
);
1475 while (object
->shadow
!= VM_OBJECT_NULL
) {
1476 shadow_obj
= object
->shadow
;
1477 vm_object_lock_shared(shadow_obj
);
1478 vm_object_unlock(object
);
1479 object
= shadow_obj
;
1484 if (object
->internal
== TRUE
) {
1485 kr
= KERN_INVALID_ADDRESS
;
1487 kr
= kmem_alloc(kernel_map
,
1488 (vm_offset_t
*) &slide_info
.slide_info_entry
,
1489 (vm_size_t
) slide_info_size
);
1490 if (kr
== KERN_SUCCESS
) {
1491 slide_info
.slide_info_size
= slide_info_size
;
1492 slide_info
.slide_object
= object
;
1493 slide_info
.start
= offset
;
1494 slide_info
.end
= slide_info
.start
+ size
;
1495 slide_info
.slide
= slide
;
1496 slide_info
.sr
= vm_shared_region_get(current_task());
1498 * We want to keep the above reference on the shared region
1499 * because we have a pointer to it in the slide_info.
1501 * If we want to have this region get deallocated/freed
1502 * then we will have to make sure that we msync(..MS_INVALIDATE..)
1503 * the pages associated with this shared region. Those pages would
1504 * have been slid with an older slide value.
1506 * vm_shared_region_deallocate(slide_info.sr);
1508 shared_region_completed_slide
= TRUE
;
1513 vm_object_unlock(object
);
1515 if (is_map_locked
== TRUE
) {
1516 vm_map_unlock_read(map
);
1522 vm_shared_region_get_slide_info(void) {
1523 return (void*)&slide_info
;
1527 vm_shared_region_get_slide_info_entry(void) {
1528 return (void*)slide_info
.slide_info_entry
;
1533 vm_shared_region_slide_sanity_check(void)
1535 uint32_t pageIndex
=0;
1536 uint16_t entryIndex
=0;
1537 uint16_t *toc
= NULL
;
1538 vm_shared_region_slide_info_entry_t s_info
;
1541 s_info
= vm_shared_region_get_slide_info_entry();
1542 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1544 kr
= mach_vm_protect(kernel_map
,
1545 (mach_vm_offset_t
)(vm_offset_t
) slide_info
.slide_info_entry
,
1546 (mach_vm_size_t
) slide_info
.slide_info_size
,
1547 VM_PROT_READ
, TRUE
);
1548 if (kr
!= KERN_SUCCESS
) {
1549 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
1552 for (;pageIndex
< s_info
->toc_count
; pageIndex
++) {
1554 entryIndex
= (uint16_t)(toc
[pageIndex
]);
1556 if (entryIndex
>= s_info
->entry_count
) {
1557 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
1562 return KERN_SUCCESS
;
1564 if (slide_info
.slide_info_entry
!= NULL
) {
1565 kmem_free(kernel_map
,
1566 (vm_offset_t
) slide_info
.slide_info_entry
,
1567 (vm_size_t
) slide_info
.slide_info_size
);
1568 vm_object_deallocate(slide_info
.slide_object
);
1569 slide_info
.slide_object
= NULL
;
1570 slide_info
.start
= 0;
1572 slide_info
.slide
= 0;
1573 slide_info
.slide_info_entry
= NULL
;
1574 slide_info
.slide_info_size
= 0;
1575 shared_region_completed_slide
= FALSE
;
1577 return KERN_FAILURE
;
1581 vm_shared_region_slide(vm_offset_t vaddr
, uint32_t pageIndex
)
1583 uint16_t *toc
= NULL
;
1584 slide_info_entry_toc_t bitmap
= NULL
;
1587 uint32_t slide
= slide_info
.slide
;
1588 int is_64
= task_has_64BitAddr(current_task());
1590 vm_shared_region_slide_info_entry_t s_info
= vm_shared_region_get_slide_info_entry();
1591 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1593 if (pageIndex
>= s_info
->toc_count
) {
1594 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
1596 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
1597 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
1599 if (entryIndex
>= s_info
->entry_count
) {
1600 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
1602 bitmap
= &slide_info_entries
[entryIndex
];
1604 for(i
=0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
1605 b
= bitmap
->entry
[i
];
1607 for (j
=0; j
<8; ++j
) {
1609 uint32_t *ptr_to_slide
;
1612 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
)+(sizeof(uint32_t)*(i
*8 +j
)));
1613 old_value
= *ptr_to_slide
;
1614 *ptr_to_slide
+= slide
;
1615 if (is_64
&& *ptr_to_slide
< old_value
) {
1617 * We just slid the low 32 bits of a 64-bit pointer
1618 * and it looks like there should have been a carry-over
1619 * to the upper 32 bits.
1620 * The sliding failed...
1622 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
1623 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
1624 return KERN_FAILURE
;
1633 return KERN_SUCCESS
;
1636 /******************************************************************************/
1637 /* Comm page support */
1638 /******************************************************************************/
1640 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
1641 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
1642 vm_named_entry_t commpage32_entry
= NULL
;
1643 vm_named_entry_t commpage64_entry
= NULL
;
1644 vm_map_t commpage32_map
= VM_MAP_NULL
;
1645 vm_map_t commpage64_map
= VM_MAP_NULL
;
1647 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
1648 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
1649 vm_named_entry_t commpage_text32_entry
= NULL
;
1650 vm_named_entry_t commpage_text64_entry
= NULL
;
1651 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
1652 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
1654 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
1655 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
1657 #if defined(__i386__) || defined(__x86_64__)
1659 * Create a memory entry, VM submap and pmap for one commpage.
1663 ipc_port_t
*handlep
,
1667 vm_named_entry_t mem_entry
;
1670 SHARED_REGION_TRACE_DEBUG(
1671 ("commpage: -> _init(0x%llx)\n",
1674 kr
= mach_memory_entry_allocate(&mem_entry
,
1676 if (kr
!= KERN_SUCCESS
) {
1677 panic("_vm_commpage_init: could not allocate mem_entry");
1679 new_map
= vm_map_create(pmap_create(NULL
, 0, FALSE
), 0, size
, TRUE
);
1680 if (new_map
== VM_MAP_NULL
) {
1681 panic("_vm_commpage_init: could not allocate VM map");
1683 mem_entry
->backing
.map
= new_map
;
1684 mem_entry
->internal
= TRUE
;
1685 mem_entry
->is_sub_map
= TRUE
;
1686 mem_entry
->offset
= 0;
1687 mem_entry
->protection
= VM_PROT_ALL
;
1688 mem_entry
->size
= size
;
1690 SHARED_REGION_TRACE_DEBUG(
1691 ("commpage: _init(0x%llx) <- %p\n",
1692 (long long)size
, *handlep
));
1698 *Initialize the comm text pages at boot time
1700 extern u_int32_t
random(void);
1702 vm_commpage_text_init(void)
1704 SHARED_REGION_TRACE_DEBUG(
1705 ("commpage text: ->init()\n"));
1706 #if defined(__i386__) || defined(__x86_64__)
1707 /* create the 32 bit comm text page */
1708 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
1709 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
1710 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
1711 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
1712 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
1713 /* XXX if (cpu_is_64bit_capable()) ? */
1714 /* create the 64-bit comm page */
1715 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
1716 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
1717 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
1718 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
1719 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
1721 commpage_text_populate();
1723 #error Unknown architecture.
1724 #endif /* __i386__ || __x86_64__ */
1725 /* populate the routines in here */
1726 SHARED_REGION_TRACE_DEBUG(
1727 ("commpage text: init() <-\n"));
1732 * Initialize the comm pages at boot time.
1735 vm_commpage_init(void)
1737 SHARED_REGION_TRACE_DEBUG(
1738 ("commpage: -> init()\n"));
1740 #if defined(__i386__) || defined(__x86_64__)
1741 /* create the 32-bit comm page */
1742 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
1743 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
1744 commpage32_map
= commpage32_entry
->backing
.map
;
1746 /* XXX if (cpu_is_64bit_capable()) ? */
1747 /* create the 64-bit comm page */
1748 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
1749 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
1750 commpage64_map
= commpage64_entry
->backing
.map
;
1752 #endif /* __i386__ || __x86_64__ */
1754 /* populate them according to this specific platform */
1755 commpage_populate();
1756 __commpage_setup
= 1;
1757 #if defined(__i386__) || defined(__x86_64__)
1758 if (__system_power_source
== 0) {
1759 post_sys_powersource_internal(0, 1);
1761 #endif /* __i386__ || __x86_64__ */
1763 SHARED_REGION_TRACE_DEBUG(
1764 ("commpage: init() <-\n"));
1768 * Enter the appropriate comm page into the task's address space.
1769 * This is called at exec() time via vm_map_exec().
1776 ipc_port_t commpage_handle
, commpage_text_handle
;
1777 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
1778 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
1782 SHARED_REGION_TRACE_DEBUG(
1783 ("commpage: -> enter(%p,%p)\n",
1786 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
1787 /* the comm page is likely to be beyond the actual end of the VM map */
1788 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
;
1790 /* select the appropriate comm page for this task */
1791 assert(! (task_has_64BitAddr(task
) ^ vm_map_is_64bit(map
)));
1792 if (task_has_64BitAddr(task
)) {
1793 commpage_handle
= commpage64_handle
;
1794 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
1795 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
1796 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
1797 objc_address
= _COMM_PAGE64_OBJC_BASE
;
1798 commpage_text_handle
= commpage_text64_handle
;
1799 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
1801 commpage_handle
= commpage32_handle
;
1803 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
1804 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
1805 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
1806 objc_address
= _COMM_PAGE32_OBJC_BASE
;
1807 commpage_text_handle
= commpage_text32_handle
;
1808 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
1811 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
1812 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
1813 /* the commpage is properly aligned or sized for pmap-nesting */
1814 vm_flags
|= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
);
1816 /* map the comm page in the task's address space */
1817 assert(commpage_handle
!= IPC_PORT_NULL
);
1818 kr
= vm_map_enter_mem_object(
1830 if (kr
!= KERN_SUCCESS
) {
1831 SHARED_REGION_TRACE_ERROR(
1832 ("commpage: enter(%p,0x%llx,0x%llx) "
1833 "commpage %p mapping failed 0x%x\n",
1834 map
, (long long)commpage_address
,
1835 (long long)commpage_size
, commpage_handle
, kr
));
1838 /* map the comm text page in the task's address space */
1839 assert(commpage_text_handle
!= IPC_PORT_NULL
);
1840 kr
= vm_map_enter_mem_object(
1842 &commpage_text_address
,
1846 commpage_text_handle
,
1849 VM_PROT_READ
|VM_PROT_EXECUTE
,
1850 VM_PROT_READ
|VM_PROT_EXECUTE
,
1852 if (kr
!= KERN_SUCCESS
) {
1853 SHARED_REGION_TRACE_ERROR(
1854 ("commpage text: enter(%p,0x%llx,0x%llx) "
1855 "commpage text %p mapping failed 0x%x\n",
1856 map
, (long long)commpage_text_address
,
1857 (long long)commpage_text_size
, commpage_text_handle
, kr
));
1861 * Since we're here, we also pre-allocate some virtual space for the
1862 * Objective-C run-time, if needed...
1864 if (objc_size
!= 0) {
1865 kr
= vm_map_enter_mem_object(
1870 VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
,
1876 VM_INHERIT_DEFAULT
);
1877 if (kr
!= KERN_SUCCESS
) {
1878 SHARED_REGION_TRACE_ERROR(
1879 ("commpage: enter(%p,0x%llx,0x%llx) "
1880 "objc mapping failed 0x%x\n",
1881 map
, (long long)objc_address
,
1882 (long long)objc_size
, kr
));
1886 SHARED_REGION_TRACE_DEBUG(
1887 ("commpage: enter(%p,%p) <- 0x%x\n",
1894 * This is called from powermanagement code to let kernel know the current source of power.
1895 * 0 if it is external source (connected to power )
1896 * 1 if it is internal power source ie battery
1899 #if defined(__i386__) || defined(__x86_64__)
1900 post_sys_powersource(int i
)
1902 post_sys_powersource(__unused
int i
)
1905 #if defined(__i386__) || defined(__x86_64__)
1906 post_sys_powersource_internal(i
, 0);
1907 #endif /* __i386__ || __x86_64__ */
1911 #if defined(__i386__) || defined(__x86_64__)
1913 post_sys_powersource_internal(int i
, int internal
)
1916 __system_power_source
= i
;
1918 if (__commpage_setup
!= 0) {
1919 if (__system_power_source
!= 0)
1920 commpage_set_spin_count(0);
1922 commpage_set_spin_count(MP_SPIN_TRIES
);
1925 #endif /* __i386__ || __x86_64__ */