2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 /* "dyld" uses this to figure out what the kernel supports */
116 int shared_region_version
= 3;
118 /* trace level, output is sent to the system log file */
119 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
121 /* should local (non-chroot) shared regions persist when no task uses them ? */
122 int shared_region_persistence
= 0; /* no by default */
124 /* delay before reclaiming an unused shared region */
125 int shared_region_destroy_delay
= 120; /* in seconds */
127 #ifndef CONFIG_EMBEDDED
129 * Only one cache gets to slide on Desktop, since we can't
130 * tear down slide info properly today and the desktop actually
131 * produces lots of shared caches.
133 boolean_t shared_region_completed_slide
= FALSE
;
136 /* this lock protects all the shared region data structures */
137 lck_grp_t
*vm_shared_region_lck_grp
;
138 lck_mtx_t vm_shared_region_lock
;
140 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
141 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
142 #define vm_shared_region_sleep(event, interruptible) \
143 lck_mtx_sleep(&vm_shared_region_lock, \
148 /* the list of currently available shared regions (one per environment) */
149 queue_head_t vm_shared_region_queue
;
151 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
152 static vm_shared_region_t
vm_shared_region_create(
156 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
158 static void vm_shared_region_timeout(thread_call_param_t param0
,
159 thread_call_param_t param1
);
161 static int __commpage_setup
= 0;
162 #if defined(__i386__) || defined(__x86_64__)
163 static int __system_power_source
= 1; /* init to extrnal power source */
164 static void post_sys_powersource_internal(int i
, int internal
);
165 #endif /* __i386__ || __x86_64__ */
169 * Initialize the module...
172 vm_shared_region_init(void)
174 SHARED_REGION_TRACE_DEBUG(
175 ("shared_region: -> init\n"));
177 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
179 lck_mtx_init(&vm_shared_region_lock
,
180 vm_shared_region_lck_grp
,
183 queue_init(&vm_shared_region_queue
);
185 SHARED_REGION_TRACE_DEBUG(
186 ("shared_region: <- init\n"));
190 * Retrieve a task's shared region and grab an extra reference to
191 * make sure it doesn't disappear while the caller is using it.
192 * The caller is responsible for consuming that extra reference if
196 vm_shared_region_get(
199 vm_shared_region_t shared_region
;
201 SHARED_REGION_TRACE_DEBUG(
202 ("shared_region: -> get(%p)\n",
203 (void *)VM_KERNEL_ADDRPERM(task
)));
206 vm_shared_region_lock();
207 shared_region
= task
->shared_region
;
209 assert(shared_region
->sr_ref_count
> 0);
210 vm_shared_region_reference_locked(shared_region
);
212 vm_shared_region_unlock();
215 SHARED_REGION_TRACE_DEBUG(
216 ("shared_region: get(%p) <- %p\n",
217 (void *)VM_KERNEL_ADDRPERM(task
),
218 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
220 return shared_region
;
224 * Get the base address of the shared region.
225 * That's the address at which it needs to be mapped in the process's address
227 * No need to lock since this data is set when the shared region is
228 * created and is never modified after that. The caller must hold an extra
229 * reference on the shared region to prevent it from being destroyed.
232 vm_shared_region_base_address(
233 vm_shared_region_t shared_region
)
235 SHARED_REGION_TRACE_DEBUG(
236 ("shared_region: -> base_address(%p)\n",
237 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
238 assert(shared_region
->sr_ref_count
> 1);
239 SHARED_REGION_TRACE_DEBUG(
240 ("shared_region: base_address(%p) <- 0x%llx\n",
241 (void *)VM_KERNEL_ADDRPERM(shared_region
),
242 (long long)shared_region
->sr_base_address
));
243 return shared_region
->sr_base_address
;
247 * Get the size of the shared region.
248 * That's the size that needs to be mapped in the process's address
250 * No need to lock since this data is set when the shared region is
251 * created and is never modified after that. The caller must hold an extra
252 * reference on the shared region to prevent it from being destroyed.
255 vm_shared_region_size(
256 vm_shared_region_t shared_region
)
258 SHARED_REGION_TRACE_DEBUG(
259 ("shared_region: -> size(%p)\n",
260 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
261 assert(shared_region
->sr_ref_count
> 1);
262 SHARED_REGION_TRACE_DEBUG(
263 ("shared_region: size(%p) <- 0x%llx\n",
264 (void *)VM_KERNEL_ADDRPERM(shared_region
),
265 (long long)shared_region
->sr_size
));
266 return shared_region
->sr_size
;
270 * Get the memory entry of the shared region.
271 * That's the "memory object" that needs to be mapped in the process's address
273 * No need to lock since this data is set when the shared region is
274 * created and is never modified after that. The caller must hold an extra
275 * reference on the shared region to prevent it from being destroyed.
278 vm_shared_region_mem_entry(
279 vm_shared_region_t shared_region
)
281 SHARED_REGION_TRACE_DEBUG(
282 ("shared_region: -> mem_entry(%p)\n",
283 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
284 assert(shared_region
->sr_ref_count
> 1);
285 SHARED_REGION_TRACE_DEBUG(
286 ("shared_region: mem_entry(%p) <- %p\n",
287 (void *)VM_KERNEL_ADDRPERM(shared_region
),
288 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
289 return shared_region
->sr_mem_entry
;
293 vm_shared_region_get_slide(
294 vm_shared_region_t shared_region
)
296 SHARED_REGION_TRACE_DEBUG(
297 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
298 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
299 assert(shared_region
->sr_ref_count
> 1);
300 SHARED_REGION_TRACE_DEBUG(
301 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
302 (void *)VM_KERNEL_ADDRPERM(shared_region
),
303 shared_region
->sr_slide_info
.slide
));
305 /* 0 if we haven't slid */
306 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
307 shared_region
->sr_slide_info
.slide
== 0);
309 return shared_region
->sr_slide_info
.slide
;
312 vm_shared_region_slide_info_t
313 vm_shared_region_get_slide_info(
314 vm_shared_region_t shared_region
)
316 SHARED_REGION_TRACE_DEBUG(
317 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
318 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
319 assert(shared_region
->sr_ref_count
> 1);
320 SHARED_REGION_TRACE_DEBUG(
321 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
322 (void *)VM_KERNEL_ADDRPERM(shared_region
),
323 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
324 return &shared_region
->sr_slide_info
;
328 * Set the shared region the process should use.
329 * A NULL new shared region means that we just want to release the old
331 * The caller should already have an extra reference on the new shared region
332 * (if any). We release a reference on the old shared region (if any).
335 vm_shared_region_set(
337 vm_shared_region_t new_shared_region
)
339 vm_shared_region_t old_shared_region
;
341 SHARED_REGION_TRACE_DEBUG(
342 ("shared_region: -> set(%p, %p)\n",
343 (void *)VM_KERNEL_ADDRPERM(task
),
344 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
347 vm_shared_region_lock();
349 old_shared_region
= task
->shared_region
;
350 if (new_shared_region
) {
351 assert(new_shared_region
->sr_ref_count
> 0);
354 task
->shared_region
= new_shared_region
;
356 vm_shared_region_unlock();
359 if (old_shared_region
) {
360 assert(old_shared_region
->sr_ref_count
> 0);
361 vm_shared_region_deallocate(old_shared_region
);
364 SHARED_REGION_TRACE_DEBUG(
365 ("shared_region: set(%p) <- old=%p new=%p\n",
366 (void *)VM_KERNEL_ADDRPERM(task
),
367 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
368 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
372 * Lookup up the shared region for the desired environment.
373 * If none is found, create a new (empty) one.
374 * Grab an extra reference on the returned shared region, to make sure
375 * it doesn't get destroyed before the caller is done with it. The caller
376 * is responsible for consuming that extra reference if necessary.
379 vm_shared_region_lookup(
384 vm_shared_region_t shared_region
;
385 vm_shared_region_t new_shared_region
;
387 SHARED_REGION_TRACE_DEBUG(
388 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
390 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
392 shared_region
= NULL
;
393 new_shared_region
= NULL
;
395 vm_shared_region_lock();
397 queue_iterate(&vm_shared_region_queue
,
401 assert(shared_region
->sr_ref_count
> 0);
402 if (shared_region
->sr_cpu_type
== cputype
&&
403 shared_region
->sr_root_dir
== root_dir
&&
404 shared_region
->sr_64bit
== is_64bit
) {
405 /* found a match ! */
406 vm_shared_region_reference_locked(shared_region
);
410 if (new_shared_region
== NULL
) {
411 /* no match: create a new one */
412 vm_shared_region_unlock();
413 new_shared_region
= vm_shared_region_create(root_dir
,
416 /* do the lookup again, in case we lost a race */
417 vm_shared_region_lock();
420 /* still no match: use our new one */
421 shared_region
= new_shared_region
;
422 new_shared_region
= NULL
;
423 queue_enter(&vm_shared_region_queue
,
431 vm_shared_region_unlock();
433 if (new_shared_region
) {
435 * We lost a race with someone else to create a new shared
436 * region for that environment. Get rid of our unused one.
438 assert(new_shared_region
->sr_ref_count
== 1);
439 new_shared_region
->sr_ref_count
--;
440 vm_shared_region_destroy(new_shared_region
);
441 new_shared_region
= NULL
;
444 SHARED_REGION_TRACE_DEBUG(
445 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
446 (void *)VM_KERNEL_ADDRPERM(root_dir
),
448 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
450 assert(shared_region
->sr_ref_count
> 0);
451 return shared_region
;
455 * Take an extra reference on a shared region.
456 * The vm_shared_region_lock should already be held by the caller.
459 vm_shared_region_reference_locked(
460 vm_shared_region_t shared_region
)
462 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
464 SHARED_REGION_TRACE_DEBUG(
465 ("shared_region: -> reference_locked(%p)\n",
466 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
467 assert(shared_region
->sr_ref_count
> 0);
468 shared_region
->sr_ref_count
++;
470 if (shared_region
->sr_timer_call
!= NULL
) {
473 /* cancel and free any pending timeout */
474 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
476 thread_call_free(shared_region
->sr_timer_call
);
477 shared_region
->sr_timer_call
= NULL
;
478 /* release the reference held by the cancelled timer */
479 shared_region
->sr_ref_count
--;
481 /* the timer will drop the reference and free itself */
485 SHARED_REGION_TRACE_DEBUG(
486 ("shared_region: reference_locked(%p) <- %d\n",
487 (void *)VM_KERNEL_ADDRPERM(shared_region
),
488 shared_region
->sr_ref_count
));
492 * Release a reference on the shared region.
493 * Destroy it if there are no references left.
496 vm_shared_region_deallocate(
497 vm_shared_region_t shared_region
)
499 SHARED_REGION_TRACE_DEBUG(
500 ("shared_region: -> deallocate(%p)\n",
501 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
503 vm_shared_region_lock();
505 assert(shared_region
->sr_ref_count
> 0);
507 if (shared_region
->sr_root_dir
== NULL
) {
509 * Local (i.e. based on the boot volume) shared regions
510 * can persist or not based on the "shared_region_persistence"
512 * Make sure that this one complies.
514 * See comments in vm_shared_region_slide() for notes about
515 * shared regions we have slid (which are not torn down currently).
517 if (shared_region_persistence
&&
518 !shared_region
->sr_persists
) {
519 /* make this one persistent */
520 shared_region
->sr_ref_count
++;
521 shared_region
->sr_persists
= TRUE
;
522 } else if (!shared_region_persistence
&&
523 shared_region
->sr_persists
) {
524 /* make this one no longer persistent */
525 assert(shared_region
->sr_ref_count
> 1);
526 shared_region
->sr_ref_count
--;
527 shared_region
->sr_persists
= FALSE
;
531 assert(shared_region
->sr_ref_count
> 0);
532 shared_region
->sr_ref_count
--;
533 SHARED_REGION_TRACE_DEBUG(
534 ("shared_region: deallocate(%p): ref now %d\n",
535 (void *)VM_KERNEL_ADDRPERM(shared_region
),
536 shared_region
->sr_ref_count
));
538 if (shared_region
->sr_ref_count
== 0) {
541 assert(!shared_region
->sr_slid
);
543 if (shared_region
->sr_timer_call
== NULL
) {
544 /* hold one reference for the timer */
545 assert(! shared_region
->sr_mapping_in_progress
);
546 shared_region
->sr_ref_count
++;
548 /* set up the timer */
549 shared_region
->sr_timer_call
= thread_call_allocate(
550 (thread_call_func_t
) vm_shared_region_timeout
,
551 (thread_call_param_t
) shared_region
);
553 /* schedule the timer */
554 clock_interval_to_deadline(shared_region_destroy_delay
,
557 thread_call_enter_delayed(shared_region
->sr_timer_call
,
560 SHARED_REGION_TRACE_DEBUG(
561 ("shared_region: deallocate(%p): armed timer\n",
562 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
564 vm_shared_region_unlock();
566 /* timer expired: let go of this shared region */
569 * We can't properly handle teardown of a slid object today.
571 assert(!shared_region
->sr_slid
);
574 * Remove it from the queue first, so no one can find
577 queue_remove(&vm_shared_region_queue
,
581 vm_shared_region_unlock();
583 /* ... and destroy it */
584 vm_shared_region_destroy(shared_region
);
585 shared_region
= NULL
;
588 vm_shared_region_unlock();
591 SHARED_REGION_TRACE_DEBUG(
592 ("shared_region: deallocate(%p) <-\n",
593 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
597 vm_shared_region_timeout(
598 thread_call_param_t param0
,
599 __unused thread_call_param_t param1
)
601 vm_shared_region_t shared_region
;
603 shared_region
= (vm_shared_region_t
) param0
;
605 vm_shared_region_deallocate(shared_region
);
609 * Create a new (empty) shared region for a new environment.
611 static vm_shared_region_t
612 vm_shared_region_create(
618 vm_named_entry_t mem_entry
;
619 ipc_port_t mem_entry_port
;
620 vm_shared_region_t shared_region
;
621 vm_shared_region_slide_info_t si
;
623 mach_vm_offset_t base_address
, pmap_nesting_start
;
624 mach_vm_size_t size
, pmap_nesting_size
;
626 SHARED_REGION_TRACE_DEBUG(
627 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
628 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
633 mem_entry_port
= IPC_PORT_NULL
;
634 sub_map
= VM_MAP_NULL
;
636 /* create a new shared region structure... */
637 shared_region
= kalloc(sizeof (*shared_region
));
638 if (shared_region
== NULL
) {
639 SHARED_REGION_TRACE_ERROR(
640 ("shared_region: create: couldn't allocate\n"));
644 /* figure out the correct settings for the desired environment */
647 #if defined(__arm64__)
649 base_address
= SHARED_REGION_BASE_ARM64
;
650 size
= SHARED_REGION_SIZE_ARM64
;
651 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
652 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
654 #elif !defined(__arm__)
656 base_address
= SHARED_REGION_BASE_X86_64
;
657 size
= SHARED_REGION_SIZE_X86_64
;
658 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
659 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
661 case CPU_TYPE_POWERPC
:
662 base_address
= SHARED_REGION_BASE_PPC64
;
663 size
= SHARED_REGION_SIZE_PPC64
;
664 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
665 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
669 SHARED_REGION_TRACE_ERROR(
670 ("shared_region: create: unknown cpu type %d\n",
672 kfree(shared_region
, sizeof (*shared_region
));
673 shared_region
= NULL
;
678 #if defined(__arm__) || defined(__arm64__)
681 base_address
= SHARED_REGION_BASE_ARM
;
682 size
= SHARED_REGION_SIZE_ARM
;
683 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
684 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
688 base_address
= SHARED_REGION_BASE_I386
;
689 size
= SHARED_REGION_SIZE_I386
;
690 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
691 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
693 case CPU_TYPE_POWERPC
:
694 base_address
= SHARED_REGION_BASE_PPC
;
695 size
= SHARED_REGION_SIZE_PPC
;
696 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
697 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
701 SHARED_REGION_TRACE_ERROR(
702 ("shared_region: create: unknown cpu type %d\n",
704 kfree(shared_region
, sizeof (*shared_region
));
705 shared_region
= NULL
;
710 /* create a memory entry structure and a Mach port handle */
711 kr
= mach_memory_entry_allocate(&mem_entry
,
713 if (kr
!= KERN_SUCCESS
) {
714 kfree(shared_region
, sizeof (*shared_region
));
715 shared_region
= NULL
;
716 SHARED_REGION_TRACE_ERROR(
717 ("shared_region: create: "
718 "couldn't allocate mem_entry\n"));
722 #if defined(__arm__) || defined(__arm64__)
724 struct pmap
*pmap_nested
;
726 pmap_nested
= pmap_create(NULL
, 0, is_64bit
);
727 if (pmap_nested
!= PMAP_NULL
) {
728 pmap_set_nested(pmap_nested
);
729 sub_map
= vm_map_create(pmap_nested
, 0, size
, TRUE
);
730 #if defined(__arm64__)
732 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
733 /* enforce 16KB alignment of VM map entries */
734 vm_map_set_page_shift(sub_map
,
735 SIXTEENK_PAGE_SHIFT
);
737 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
738 /* enforce 16KB alignment for watch targets with new ABI */
739 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
740 #endif /* __arm64__ */
742 sub_map
= VM_MAP_NULL
;
746 /* create a VM sub map and its pmap */
747 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
751 if (sub_map
== VM_MAP_NULL
) {
752 ipc_port_release_send(mem_entry_port
);
753 kfree(shared_region
, sizeof (*shared_region
));
754 shared_region
= NULL
;
755 SHARED_REGION_TRACE_ERROR(
756 ("shared_region: create: "
757 "couldn't allocate map\n"));
761 assert(!sub_map
->disable_vmentry_reuse
);
762 sub_map
->is_nested_map
= TRUE
;
764 /* make the memory entry point to the VM sub map */
765 mem_entry
->is_sub_map
= TRUE
;
766 mem_entry
->backing
.map
= sub_map
;
767 mem_entry
->size
= size
;
768 mem_entry
->protection
= VM_PROT_ALL
;
770 /* make the shared region point at the memory entry */
771 shared_region
->sr_mem_entry
= mem_entry_port
;
773 /* fill in the shared region's environment and settings */
774 shared_region
->sr_base_address
= base_address
;
775 shared_region
->sr_size
= size
;
776 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
777 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
778 shared_region
->sr_cpu_type
= cputype
;
779 shared_region
->sr_64bit
= is_64bit
;
780 shared_region
->sr_root_dir
= root_dir
;
782 queue_init(&shared_region
->sr_q
);
783 shared_region
->sr_mapping_in_progress
= FALSE
;
784 shared_region
->sr_slide_in_progress
= FALSE
;
785 shared_region
->sr_persists
= FALSE
;
786 shared_region
->sr_slid
= FALSE
;
787 shared_region
->sr_timer_call
= NULL
;
788 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
790 /* grab a reference for the caller */
791 shared_region
->sr_ref_count
= 1;
793 /* And set up slide info */
794 si
= &shared_region
->sr_slide_info
;
798 si
->slide_object
= NULL
;
799 si
->slide_info_size
= 0;
800 si
->slide_info_entry
= NULL
;
802 /* Initialize UUID */
803 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
804 shared_region
->sr_uuid_copied
= FALSE
;
807 SHARED_REGION_TRACE_INFO(
808 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
809 "base=0x%llx,size=0x%llx) <- "
810 "%p mem=(%p,%p) map=%p pmap=%p\n",
811 (void *)VM_KERNEL_ADDRPERM(root_dir
),
812 cputype
, is_64bit
, (long long)base_address
,
814 (void *)VM_KERNEL_ADDRPERM(shared_region
),
815 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
816 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
817 (void *)VM_KERNEL_ADDRPERM(sub_map
),
818 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
820 SHARED_REGION_TRACE_INFO(
821 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
822 "base=0x%llx,size=0x%llx) <- NULL",
823 (void *)VM_KERNEL_ADDRPERM(root_dir
),
824 cputype
, is_64bit
, (long long)base_address
,
827 return shared_region
;
831 * Destroy a now-unused shared region.
832 * The shared region is no longer in the queue and can not be looked up.
835 vm_shared_region_destroy(
836 vm_shared_region_t shared_region
)
838 vm_named_entry_t mem_entry
;
841 SHARED_REGION_TRACE_INFO(
842 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
843 (void *)VM_KERNEL_ADDRPERM(shared_region
),
844 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
845 shared_region
->sr_cpu_type
,
846 shared_region
->sr_64bit
));
848 assert(shared_region
->sr_ref_count
== 0);
849 assert(!shared_region
->sr_persists
);
850 assert(!shared_region
->sr_slid
);
852 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
853 assert(mem_entry
->is_sub_map
);
854 assert(!mem_entry
->internal
);
855 assert(!mem_entry
->is_copy
);
856 map
= mem_entry
->backing
.map
;
859 * Clean up the pmap first. The virtual addresses that were
860 * entered in this possibly "nested" pmap may have different values
861 * than the VM map's min and max offsets, if the VM sub map was
862 * mapped at a non-zero offset in the processes' main VM maps, which
863 * is usually the case, so the clean-up we do in vm_map_destroy() would
867 pmap_remove(map
->pmap
,
868 shared_region
->sr_base_address
,
869 (shared_region
->sr_base_address
+
870 shared_region
->sr_size
));
874 * Release our (one and only) handle on the memory entry.
875 * This will generate a no-senders notification, which will be processed
876 * by ipc_kobject_notify(), which will release the one and only
877 * reference on the memory entry and cause it to be destroyed, along
878 * with the VM sub map and its pmap.
880 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
882 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
884 if (shared_region
->sr_timer_call
) {
885 thread_call_free(shared_region
->sr_timer_call
);
890 * If slid, free those resources. We'll want this eventually,
891 * but can't handle it properly today.
893 si
= &shared_region
->sr_slide_info
;
894 if (si
->slide_info_entry
) {
895 kmem_free(kernel_map
,
896 (vm_offset_t
) si
->slide_info_entry
,
897 (vm_size_t
) si
->slide_info_size
);
898 vm_object_deallocate(si
->slide_object
);
902 /* release the shared region structure... */
903 kfree(shared_region
, sizeof (*shared_region
));
905 SHARED_REGION_TRACE_DEBUG(
906 ("shared_region: destroy(%p) <-\n",
907 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
908 shared_region
= NULL
;
913 * Gets the address of the first (in time) mapping in the shared region.
916 vm_shared_region_start_address(
917 vm_shared_region_t shared_region
,
918 mach_vm_offset_t
*start_address
)
921 mach_vm_offset_t sr_base_address
;
922 mach_vm_offset_t sr_first_mapping
;
924 SHARED_REGION_TRACE_DEBUG(
925 ("shared_region: -> start_address(%p)\n",
926 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
927 assert(shared_region
->sr_ref_count
> 1);
929 vm_shared_region_lock();
932 * Wait if there's another thread establishing a mapping
933 * in this shared region right when we're looking at it.
934 * We want a consistent view of the map...
936 while (shared_region
->sr_mapping_in_progress
) {
937 /* wait for our turn... */
938 assert(shared_region
->sr_ref_count
> 1);
939 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
942 assert(! shared_region
->sr_mapping_in_progress
);
943 assert(shared_region
->sr_ref_count
> 1);
945 sr_base_address
= shared_region
->sr_base_address
;
946 sr_first_mapping
= shared_region
->sr_first_mapping
;
948 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
949 /* shared region is empty */
950 kr
= KERN_INVALID_ADDRESS
;
953 *start_address
= sr_base_address
+ sr_first_mapping
;
956 vm_shared_region_unlock();
958 SHARED_REGION_TRACE_DEBUG(
959 ("shared_region: start_address(%p) <- 0x%llx\n",
960 (void *)VM_KERNEL_ADDRPERM(shared_region
),
961 (long long)shared_region
->sr_base_address
));
967 vm_shared_region_undo_mappings(
969 mach_vm_offset_t sr_base_address
,
970 struct shared_file_mapping_np
*mappings
,
971 unsigned int mappings_count
)
974 vm_shared_region_t shared_region
= NULL
;
975 boolean_t reset_shared_region_state
= FALSE
;
977 shared_region
= vm_shared_region_get(current_task());
978 if (shared_region
== NULL
) {
979 printf("Failed to undo mappings because of NULL shared region.\n");
984 if (sr_map
== NULL
) {
985 ipc_port_t sr_handle
;
986 vm_named_entry_t sr_mem_entry
;
988 vm_shared_region_lock();
989 assert(shared_region
->sr_ref_count
> 1);
991 while (shared_region
->sr_mapping_in_progress
) {
992 /* wait for our turn... */
993 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
996 assert(! shared_region
->sr_mapping_in_progress
);
997 assert(shared_region
->sr_ref_count
> 1);
998 /* let others know we're working in this shared region */
999 shared_region
->sr_mapping_in_progress
= TRUE
;
1001 vm_shared_region_unlock();
1003 reset_shared_region_state
= TRUE
;
1005 /* no need to lock because this data is never modified... */
1006 sr_handle
= shared_region
->sr_mem_entry
;
1007 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1008 sr_map
= sr_mem_entry
->backing
.map
;
1009 sr_base_address
= shared_region
->sr_base_address
;
1012 * Undo the mappings we've established so far.
1014 for (j
= 0; j
< mappings_count
; j
++) {
1017 if (mappings
[j
].sfm_size
== 0) {
1019 * We didn't establish this
1020 * mapping, so nothing to undo.
1024 SHARED_REGION_TRACE_INFO(
1025 ("shared_region: mapping[%d]: "
1026 "address:0x%016llx "
1029 "maxprot:0x%x prot:0x%x: "
1032 (long long)mappings
[j
].sfm_address
,
1033 (long long)mappings
[j
].sfm_size
,
1034 (long long)mappings
[j
].sfm_file_offset
,
1035 mappings
[j
].sfm_max_prot
,
1036 mappings
[j
].sfm_init_prot
));
1037 kr2
= mach_vm_deallocate(
1039 (mappings
[j
].sfm_address
-
1041 mappings
[j
].sfm_size
);
1042 assert(kr2
== KERN_SUCCESS
);
1045 if (reset_shared_region_state
) {
1046 vm_shared_region_lock();
1047 assert(shared_region
->sr_ref_count
> 1);
1048 assert(shared_region
->sr_mapping_in_progress
);
1049 /* we're done working on that shared region */
1050 shared_region
->sr_mapping_in_progress
= FALSE
;
1051 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1052 vm_shared_region_unlock();
1053 reset_shared_region_state
= FALSE
;
1056 vm_shared_region_deallocate(shared_region
);
1060 * Establish some mappings of a file in the shared region.
1061 * This is used by "dyld" via the shared_region_map_np() system call
1062 * to populate the shared region with the appropriate shared cache.
1064 * One could also call it several times to incrementally load several
1065 * libraries, as long as they do not overlap.
1066 * It will return KERN_SUCCESS if the mappings were successfully established
1067 * or if they were already established identically by another process.
1070 vm_shared_region_map_file(
1071 vm_shared_region_t shared_region
,
1072 unsigned int mappings_count
,
1073 struct shared_file_mapping_np
*mappings
,
1074 memory_object_control_t file_control
,
1075 memory_object_size_t file_size
,
1078 user_addr_t slide_start
,
1079 user_addr_t slide_size
)
1082 vm_object_t file_object
;
1083 ipc_port_t sr_handle
;
1084 vm_named_entry_t sr_mem_entry
;
1086 mach_vm_offset_t sr_base_address
;
1088 mach_port_t map_port
;
1089 vm_map_offset_t target_address
;
1091 vm_object_size_t obj_size
;
1092 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1093 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1094 vm_map_offset_t lowest_unnestable_addr
= 0;
1095 vm_map_kernel_flags_t vmk_flags
;
1099 if ((shared_region
->sr_64bit
||
1100 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1101 ((slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1102 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1103 __FUNCTION__
, slide
);
1104 kr
= KERN_INVALID_ARGUMENT
;
1107 #endif /* __arm64__ */
1111 vm_shared_region_lock();
1112 assert(shared_region
->sr_ref_count
> 1);
1114 if (shared_region
->sr_root_dir
!= root_dir
) {
1116 * This shared region doesn't match the current root
1117 * directory of this process. Deny the mapping to
1118 * avoid tainting the shared region with something that
1119 * doesn't quite belong into it.
1121 vm_shared_region_unlock();
1122 kr
= KERN_PROTECTION_FAILURE
;
1127 * Make sure we handle only one mapping at a time in a given
1128 * shared region, to avoid race conditions. This should not
1129 * happen frequently...
1131 while (shared_region
->sr_mapping_in_progress
) {
1132 /* wait for our turn... */
1133 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1136 assert(! shared_region
->sr_mapping_in_progress
);
1137 assert(shared_region
->sr_ref_count
> 1);
1138 /* let others know we're working in this shared region */
1139 shared_region
->sr_mapping_in_progress
= TRUE
;
1141 vm_shared_region_unlock();
1143 /* no need to lock because this data is never modified... */
1144 sr_handle
= shared_region
->sr_mem_entry
;
1145 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1146 sr_map
= sr_mem_entry
->backing
.map
;
1147 sr_base_address
= shared_region
->sr_base_address
;
1149 SHARED_REGION_TRACE_DEBUG(
1150 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1151 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1152 (void *)VM_KERNEL_ADDRPERM(mappings
),
1153 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1155 /* get the VM object associated with the file to be mapped */
1156 file_object
= memory_object_control_to_vm_object(file_control
);
1158 assert(file_object
);
1160 /* establish the mappings */
1161 for (i
= 0; i
< mappings_count
; i
++) {
1162 SHARED_REGION_TRACE_INFO(
1163 ("shared_region: mapping[%d]: "
1164 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1165 "maxprot:0x%x prot:0x%x\n",
1167 (long long)mappings
[i
].sfm_address
,
1168 (long long)mappings
[i
].sfm_size
,
1169 (long long)mappings
[i
].sfm_file_offset
,
1170 mappings
[i
].sfm_max_prot
,
1171 mappings
[i
].sfm_init_prot
));
1173 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1174 /* zero-filled memory */
1175 map_port
= MACH_PORT_NULL
;
1177 /* file-backed memory */
1178 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1181 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1183 * This is the mapping that needs to be slid.
1185 if (mapping_to_slide
!= NULL
) {
1186 SHARED_REGION_TRACE_INFO(
1187 ("shared_region: mapping[%d]: "
1188 "address:0x%016llx size:0x%016llx "
1190 "maxprot:0x%x prot:0x%x "
1191 "will not be slid as only one such mapping is allowed...\n",
1193 (long long)mappings
[i
].sfm_address
,
1194 (long long)mappings
[i
].sfm_size
,
1195 (long long)mappings
[i
].sfm_file_offset
,
1196 mappings
[i
].sfm_max_prot
,
1197 mappings
[i
].sfm_init_prot
));
1199 mapping_to_slide
= &mappings
[i
];
1203 /* mapping's address is relative to the shared region base */
1205 mappings
[i
].sfm_address
- sr_base_address
;
1207 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1208 vmk_flags
.vmkf_already
= TRUE
;
1210 /* establish that mapping, OK if it's "already" there */
1211 if (map_port
== MACH_PORT_NULL
) {
1213 * We want to map some anonymous memory in a
1215 * We have to create the VM object now, so that it
1216 * can be mapped "copy-on-write".
1218 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1219 VM_MAP_PAGE_MASK(sr_map
));
1220 object
= vm_object_allocate(obj_size
);
1221 if (object
== VM_OBJECT_NULL
) {
1222 kr
= KERN_RESOURCE_SHORTAGE
;
1227 vm_map_round_page(mappings
[i
].sfm_size
,
1228 VM_MAP_PAGE_MASK(sr_map
)),
1232 VM_KERN_MEMORY_NONE
,
1236 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1237 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1238 VM_INHERIT_DEFAULT
);
1241 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1242 kr
= vm_map_enter_mem_object(
1245 vm_map_round_page(mappings
[i
].sfm_size
,
1246 VM_MAP_PAGE_MASK(sr_map
)),
1250 VM_KERN_MEMORY_NONE
,
1252 mappings
[i
].sfm_file_offset
,
1254 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1255 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1256 VM_INHERIT_DEFAULT
);
1260 if (kr
== KERN_SUCCESS
) {
1262 * Record the first (chronologically) successful
1263 * mapping in this shared region.
1264 * We're protected by "sr_mapping_in_progress" here,
1265 * so no need to lock "shared_region".
1267 if (first_mapping
== (mach_vm_offset_t
) -1) {
1268 first_mapping
= target_address
;
1272 * Record the lowest writable address in this
1273 * sub map, to log any unexpected unnesting below
1274 * that address (see log_unnest_badness()).
1276 if ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) &&
1277 sr_map
->is_nested_map
&&
1278 (lowest_unnestable_addr
== 0 ||
1279 (target_address
< lowest_unnestable_addr
))) {
1280 lowest_unnestable_addr
= target_address
;
1283 if (map_port
== MACH_PORT_NULL
) {
1285 * Get rid of the VM object we just created
1286 * but failed to map.
1288 vm_object_deallocate(object
);
1289 object
= VM_OBJECT_NULL
;
1291 if (kr
== KERN_MEMORY_PRESENT
) {
1293 * This exact mapping was already there:
1296 SHARED_REGION_TRACE_INFO(
1297 ("shared_region: mapping[%d]: "
1298 "address:0x%016llx size:0x%016llx "
1300 "maxprot:0x%x prot:0x%x "
1301 "already mapped...\n",
1303 (long long)mappings
[i
].sfm_address
,
1304 (long long)mappings
[i
].sfm_size
,
1305 (long long)mappings
[i
].sfm_file_offset
,
1306 mappings
[i
].sfm_max_prot
,
1307 mappings
[i
].sfm_init_prot
));
1309 * We didn't establish this mapping ourselves;
1310 * let's reset its size, so that we do not
1311 * attempt to undo it if an error occurs later.
1313 mappings
[i
].sfm_size
= 0;
1316 /* this mapping failed ! */
1317 SHARED_REGION_TRACE_ERROR(
1318 ("shared_region: mapping[%d]: "
1319 "address:0x%016llx size:0x%016llx "
1321 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1323 (long long)mappings
[i
].sfm_address
,
1324 (long long)mappings
[i
].sfm_size
,
1325 (long long)mappings
[i
].sfm_file_offset
,
1326 mappings
[i
].sfm_max_prot
,
1327 mappings
[i
].sfm_init_prot
,
1330 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1338 if (kr
== KERN_SUCCESS
&&
1340 mapping_to_slide
!= NULL
) {
1341 kr
= vm_shared_region_slide(slide
,
1342 mapping_to_slide
->sfm_file_offset
,
1343 mapping_to_slide
->sfm_size
,
1347 if (kr
!= KERN_SUCCESS
) {
1348 SHARED_REGION_TRACE_ERROR(
1349 ("shared_region: region_slide("
1350 "slide:0x%x start:0x%016llx "
1351 "size:0x%016llx) failed 0x%x\n",
1353 (long long)slide_start
,
1354 (long long)slide_size
,
1356 vm_shared_region_undo_mappings(sr_map
,
1363 if (kr
== KERN_SUCCESS
) {
1364 /* adjust the map's "lowest_unnestable_start" */
1365 lowest_unnestable_addr
&= ~(pmap_nesting_size_min
-1);
1366 if (lowest_unnestable_addr
!=
1367 sr_map
->lowest_unnestable_start
) {
1368 vm_map_lock(sr_map
);
1369 sr_map
->lowest_unnestable_start
=
1370 lowest_unnestable_addr
;
1371 vm_map_unlock(sr_map
);
1375 vm_shared_region_lock();
1376 assert(shared_region
->sr_ref_count
> 1);
1377 assert(shared_region
->sr_mapping_in_progress
);
1378 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1379 if (kr
== KERN_SUCCESS
&&
1380 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1381 shared_region
->sr_first_mapping
= first_mapping
;
1385 /* copy in the shared region UUID to the shared region structure */
1386 if (kr
== KERN_SUCCESS
&& !shared_region
->sr_uuid_copied
) {
1387 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1388 offsetof(struct _dyld_cache_header
, uuid
)),
1389 (char *)&shared_region
->sr_uuid
,
1390 sizeof(shared_region
->sr_uuid
));
1392 shared_region
->sr_uuid_copied
= TRUE
;
1394 #if DEVELOPMENT || DEBUG
1395 panic("shared_region: copyin_UUID(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1396 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1397 (long long)shared_region
->sr_base_address
,
1398 (long long)shared_region
->sr_first_mapping
,
1399 (long long)offsetof(struct _dyld_cache_header
, uuid
),
1400 (long long)sizeof(shared_region
->sr_uuid
),
1402 #endif /* DEVELOPMENT || DEBUG */
1403 shared_region
->sr_uuid_copied
= FALSE
;
1407 /* we're done working on that shared region */
1408 shared_region
->sr_mapping_in_progress
= FALSE
;
1409 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1410 vm_shared_region_unlock();
1413 SHARED_REGION_TRACE_DEBUG(
1414 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1415 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1416 (void *)VM_KERNEL_ADDRPERM(mappings
),
1417 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1422 * Enter the appropriate shared region into "map" for "task".
1423 * This involves looking up the shared region (and possibly creating a new
1424 * one) for the desired environment, then mapping the VM sub map into the
1425 * task's VM "map", with the appropriate level of pmap-nesting.
1428 vm_shared_region_enter(
1429 struct _vm_map
*map
,
1436 vm_shared_region_t shared_region
;
1437 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1438 vm_map_size_t sr_size
, mapping_size
;
1439 vm_map_offset_t sr_pmap_nesting_start
;
1440 vm_map_size_t sr_pmap_nesting_size
;
1441 ipc_port_t sr_handle
;
1442 vm_prot_t cur_prot
, max_prot
;
1444 SHARED_REGION_TRACE_DEBUG(
1445 ("shared_region: -> "
1446 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1447 (void *)VM_KERNEL_ADDRPERM(map
),
1448 (void *)VM_KERNEL_ADDRPERM(task
),
1449 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1451 /* lookup (create if needed) the shared region for this environment */
1452 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, is_64bit
);
1453 if (shared_region
== NULL
) {
1454 /* this should not happen ! */
1455 SHARED_REGION_TRACE_ERROR(
1456 ("shared_region: -> "
1457 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1458 "lookup failed !\n",
1459 (void *)VM_KERNEL_ADDRPERM(map
),
1460 (void *)VM_KERNEL_ADDRPERM(task
),
1461 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1462 //panic("shared_region_enter: lookup failed\n");
1463 return KERN_FAILURE
;
1466 /* let the task use that shared region */
1467 vm_shared_region_set(task
, shared_region
);
1470 /* no need to lock since this data is never modified */
1471 sr_address
= shared_region
->sr_base_address
;
1472 sr_size
= shared_region
->sr_size
;
1473 sr_handle
= shared_region
->sr_mem_entry
;
1474 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1475 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1477 cur_prot
= VM_PROT_READ
;
1480 * XXX BINARY COMPATIBILITY
1481 * java6 apparently needs to modify some code in the
1482 * dyld shared cache and needs to be allowed to add
1485 max_prot
= VM_PROT_ALL
;
1486 #else /* __x86_64__ */
1487 max_prot
= VM_PROT_READ
;
1488 #endif /* __x86_64__ */
1490 * Start mapping the shared region's VM sub map into the task's VM map.
1494 if (sr_pmap_nesting_start
> sr_address
) {
1495 /* we need to map a range without pmap-nesting first */
1496 target_address
= sr_address
;
1497 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1498 kr
= vm_map_enter_mem_object(
1504 VM_MAP_KERNEL_FLAGS_NONE
,
1505 VM_KERN_MEMORY_NONE
,
1512 if (kr
!= KERN_SUCCESS
) {
1513 SHARED_REGION_TRACE_ERROR(
1514 ("shared_region: enter(%p,%p,%p,%d,%d): "
1515 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1516 (void *)VM_KERNEL_ADDRPERM(map
),
1517 (void *)VM_KERNEL_ADDRPERM(task
),
1518 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1520 (long long)target_address
,
1521 (long long)mapping_size
,
1522 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1525 SHARED_REGION_TRACE_DEBUG(
1526 ("shared_region: enter(%p,%p,%p,%d,%d): "
1527 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1528 (void *)VM_KERNEL_ADDRPERM(map
),
1529 (void *)VM_KERNEL_ADDRPERM(task
),
1530 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1531 (long long)target_address
, (long long)mapping_size
,
1532 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1533 sr_offset
+= mapping_size
;
1534 sr_size
-= mapping_size
;
1537 * We may need to map several pmap-nested portions, due to platform
1538 * specific restrictions on pmap nesting.
1539 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1542 sr_pmap_nesting_size
> 0;
1543 sr_offset
+= mapping_size
,
1544 sr_size
-= mapping_size
,
1545 sr_pmap_nesting_size
-= mapping_size
) {
1546 target_address
= sr_address
+ sr_offset
;
1547 mapping_size
= sr_pmap_nesting_size
;
1548 if (mapping_size
> pmap_nesting_size_max
) {
1549 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1551 kr
= vm_map_enter_mem_object(
1557 VM_MAP_KERNEL_FLAGS_NONE
,
1558 VM_MEMORY_SHARED_PMAP
,
1565 if (kr
!= KERN_SUCCESS
) {
1566 SHARED_REGION_TRACE_ERROR(
1567 ("shared_region: enter(%p,%p,%p,%d,%d): "
1568 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1569 (void *)VM_KERNEL_ADDRPERM(map
),
1570 (void *)VM_KERNEL_ADDRPERM(task
),
1571 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1573 (long long)target_address
,
1574 (long long)mapping_size
,
1575 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1578 SHARED_REGION_TRACE_DEBUG(
1579 ("shared_region: enter(%p,%p,%p,%d,%d): "
1580 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1581 (void *)VM_KERNEL_ADDRPERM(map
),
1582 (void *)VM_KERNEL_ADDRPERM(task
),
1583 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1584 (long long)target_address
, (long long)mapping_size
,
1585 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1588 /* and there's some left to be mapped without pmap-nesting */
1589 target_address
= sr_address
+ sr_offset
;
1590 mapping_size
= sr_size
;
1591 kr
= vm_map_enter_mem_object(
1597 VM_MAP_KERNEL_FLAGS_NONE
,
1598 VM_KERN_MEMORY_NONE
,
1605 if (kr
!= KERN_SUCCESS
) {
1606 SHARED_REGION_TRACE_ERROR(
1607 ("shared_region: enter(%p,%p,%p,%d,%d): "
1608 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1609 (void *)VM_KERNEL_ADDRPERM(map
),
1610 (void *)VM_KERNEL_ADDRPERM(task
),
1611 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1613 (long long)target_address
,
1614 (long long)mapping_size
,
1615 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1618 SHARED_REGION_TRACE_DEBUG(
1619 ("shared_region: enter(%p,%p,%p,%d,%d): "
1620 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1621 (void *)VM_KERNEL_ADDRPERM(map
),
1622 (void *)VM_KERNEL_ADDRPERM(task
),
1623 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1624 (long long)target_address
, (long long)mapping_size
,
1625 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1626 sr_offset
+= mapping_size
;
1627 sr_size
-= mapping_size
;
1629 assert(sr_size
== 0);
1632 SHARED_REGION_TRACE_DEBUG(
1633 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1634 (void *)VM_KERNEL_ADDRPERM(map
),
1635 (void *)VM_KERNEL_ADDRPERM(task
),
1636 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
, kr
));
1640 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1641 struct vm_shared_region_slide_info slide_info
;
1644 vm_shared_region_sliding_valid(uint32_t slide
)
1646 kern_return_t kr
= KERN_SUCCESS
;
1647 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1649 /* No region yet? we're fine. */
1654 if ((sr
->sr_slid
== TRUE
) && slide
) {
1655 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1656 printf("Only one shared region can be slid\n");
1660 * Request for sliding when we've
1661 * already done it with exactly the
1662 * same slide value before.
1663 * This isn't wrong technically but
1664 * we don't want to slide again and
1665 * so we return this value.
1667 kr
= KERN_INVALID_ARGUMENT
;
1670 vm_shared_region_deallocate(sr
);
1675 vm_shared_region_slide_init(
1676 vm_shared_region_t sr
,
1677 mach_vm_size_t slide_info_size
,
1678 mach_vm_offset_t start
,
1679 mach_vm_size_t size
,
1681 memory_object_control_t sr_file_control
)
1683 kern_return_t kr
= KERN_SUCCESS
;
1684 vm_object_t object
= VM_OBJECT_NULL
;
1685 vm_object_offset_t offset
= 0;
1686 vm_shared_region_slide_info_t si
= vm_shared_region_get_slide_info(sr
);
1687 vm_offset_t slide_info_entry
;
1689 vm_map_t map
= NULL
, cur_map
= NULL
;
1690 boolean_t is_map_locked
= FALSE
;
1692 assert(sr
->sr_slide_in_progress
);
1693 assert(!sr
->sr_slid
);
1694 assert(si
->slide_object
== NULL
);
1695 assert(si
->slide_info_entry
== NULL
);
1697 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1698 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1703 kr
= kmem_alloc(kernel_map
,
1704 (vm_offset_t
*) &slide_info_entry
,
1705 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1706 if (kr
!= KERN_SUCCESS
) {
1710 if (sr_file_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1712 object
= memory_object_control_to_vm_object(sr_file_control
);
1713 vm_object_reference(object
);
1716 vm_object_lock(object
);
1719 * Remove this entire "else" block and all "map" references
1720 * once we get rid of the shared_region_slide_np()
1723 vm_map_entry_t entry
= VM_MAP_ENTRY_NULL
;
1724 map
= current_map();
1725 vm_map_lock_read(map
);
1726 is_map_locked
= TRUE
;
1729 if(!vm_map_lookup_entry(map
, start
, &entry
)) {
1730 kr
= KERN_INVALID_ARGUMENT
;
1732 vm_object_t shadow_obj
= VM_OBJECT_NULL
;
1734 if (entry
->is_sub_map
== TRUE
) {
1735 map
= VME_SUBMAP(entry
);
1736 start
-= entry
->vme_start
;
1737 start
+= VME_OFFSET(entry
);
1738 vm_map_lock_read(map
);
1739 vm_map_unlock_read(cur_map
);
1742 object
= VME_OBJECT(entry
);
1743 offset
= ((start
- entry
->vme_start
) +
1747 vm_object_lock(object
);
1748 while (object
->shadow
!= VM_OBJECT_NULL
) {
1749 shadow_obj
= object
->shadow
;
1750 vm_object_lock(shadow_obj
);
1751 vm_object_unlock(object
);
1752 object
= shadow_obj
;
1757 if (object
->internal
== TRUE
) {
1758 kr
= KERN_INVALID_ADDRESS
;
1759 } else if (object
->object_slid
) {
1760 /* Can only be slid once */
1761 printf("%s: found vm_object %p already slid?\n", __FUNCTION__
, object
);
1765 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1766 si
->slide_info_size
= slide_info_size
;
1767 si
->slide_object
= object
;
1769 si
->end
= si
->start
+ size
;
1773 * If we want to have this region get deallocated/freed
1774 * then we will have to make sure that we msync(..MS_INVALIDATE..)
1775 * the pages associated with this shared region. Those pages would
1776 * have been slid with an older slide value.
1780 * Pointers in object are held without references; they
1781 * are disconnected at the time that we destroy the
1782 * shared region, and since the shared region holds
1783 * a reference on the object, no references in the other
1784 * direction are required.
1786 object
->object_slid
= TRUE
;
1787 object
->vo_slide_info
= si
;
1790 vm_object_unlock(object
);
1791 if (is_map_locked
== TRUE
) {
1792 vm_map_unlock_read(map
);
1795 if (kr
!= KERN_SUCCESS
) {
1796 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
1802 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
) {
1803 return (void*)sr
->sr_slide_info
.slide_info_entry
;
1806 static kern_return_t
1807 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info
)
1809 uint32_t pageIndex
=0;
1810 uint16_t entryIndex
=0;
1811 uint16_t *toc
= NULL
;
1813 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1814 for (;pageIndex
< s_info
->toc_count
; pageIndex
++) {
1816 entryIndex
= (uint16_t)(toc
[pageIndex
]);
1818 if (entryIndex
>= s_info
->entry_count
) {
1819 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
1820 return KERN_FAILURE
;
1824 return KERN_SUCCESS
;
1827 static kern_return_t
1828 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info
, mach_vm_size_t slide_info_size
)
1830 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
1831 return KERN_FAILURE
;
1834 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
1836 uint32_t page_starts_count
= s_info
->page_starts_count
;
1837 uint32_t page_extras_count
= s_info
->page_extras_count
;
1838 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
1839 if (num_trailing_entries
< page_starts_count
) {
1840 return KERN_FAILURE
;
1843 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
1844 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
1845 if (trailing_size
>> 1 != num_trailing_entries
) {
1846 return KERN_FAILURE
;
1849 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
1850 if (required_size
< sizeof(*s_info
)) {
1851 return KERN_FAILURE
;
1854 if (required_size
> slide_info_size
) {
1855 return KERN_FAILURE
;
1858 return KERN_SUCCESS
;
1862 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
1864 vm_shared_region_slide_info_t si
;
1865 vm_shared_region_slide_info_entry_t s_info
;
1868 si
= vm_shared_region_get_slide_info(sr
);
1869 s_info
= si
->slide_info_entry
;
1871 kr
= mach_vm_protect(kernel_map
,
1872 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
1873 (mach_vm_size_t
) si
->slide_info_size
,
1874 TRUE
, VM_PROT_READ
);
1875 if (kr
!= KERN_SUCCESS
) {
1876 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
1879 if (s_info
->version
== 1) {
1880 kr
= vm_shared_region_slide_sanity_check_v1(&s_info
->v1
);
1881 } else if (s_info
->version
== 2) {
1882 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, si
->slide_info_size
);
1886 if (kr
!= KERN_SUCCESS
) {
1890 return KERN_SUCCESS
;
1892 if (si
->slide_info_entry
!= NULL
) {
1893 kmem_free(kernel_map
,
1894 (vm_offset_t
) si
->slide_info_entry
,
1895 (vm_size_t
) si
->slide_info_size
);
1897 vm_object_lock(si
->slide_object
);
1898 si
->slide_object
->object_slid
= FALSE
;
1899 si
->slide_object
->vo_slide_info
= NULL
;
1900 vm_object_unlock(si
->slide_object
);
1902 vm_object_deallocate(si
->slide_object
);
1903 si
->slide_object
= NULL
;
1907 si
->slide_info_entry
= NULL
;
1908 si
->slide_info_size
= 0;
1910 return KERN_FAILURE
;
1913 static kern_return_t
1914 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
1916 uint16_t *toc
= NULL
;
1917 slide_info_entry_toc_t bitmap
= NULL
;
1920 uint32_t slide
= si
->slide
;
1921 int is_64
= task_has_64BitAddr(current_task());
1923 vm_shared_region_slide_info_entry_v1_t s_info
= &si
->slide_info_entry
->v1
;
1924 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1926 if (pageIndex
>= s_info
->toc_count
) {
1927 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
1929 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
1930 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
1932 if (entryIndex
>= s_info
->entry_count
) {
1933 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
1935 bitmap
= &slide_info_entries
[entryIndex
];
1937 for(i
=0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
1938 b
= bitmap
->entry
[i
];
1940 for (j
=0; j
<8; ++j
) {
1942 uint32_t *ptr_to_slide
;
1945 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
)+(sizeof(uint32_t)*(i
*8 +j
)));
1946 old_value
= *ptr_to_slide
;
1947 *ptr_to_slide
+= slide
;
1948 if (is_64
&& *ptr_to_slide
< old_value
) {
1950 * We just slid the low 32 bits of a 64-bit pointer
1951 * and it looks like there should have been a carry-over
1952 * to the upper 32 bits.
1953 * The sliding failed...
1955 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
1956 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
1957 return KERN_FAILURE
;
1966 return KERN_SUCCESS
;
1969 static kern_return_t
1971 uint8_t *page_content
,
1972 uint16_t start_offset
,
1973 uint32_t slide_amount
,
1974 vm_shared_region_slide_info_entry_v2_t s_info
)
1976 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
1978 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
1979 const uint32_t value_mask
= ~delta_mask
;
1980 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
1981 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
1983 uint32_t page_offset
= start_offset
;
1986 while (delta
!= 0 && page_offset
<= last_page_offset
) {
1990 loc
= page_content
+ page_offset
;
1991 memcpy(&value
, loc
, sizeof(value
));
1992 delta
= (value
& delta_mask
) >> delta_shift
;
1993 value
&= value_mask
;
1997 value
+= slide_amount
;
1999 memcpy(loc
, &value
, sizeof(value
));
2000 page_offset
+= delta
;
2003 /* If the offset went past the end of the page, then the slide data is invalid. */
2004 if (page_offset
> last_page_offset
) {
2005 return KERN_FAILURE
;
2007 return KERN_SUCCESS
;
2010 static kern_return_t
2012 uint8_t *page_content
,
2013 uint16_t start_offset
,
2014 uint32_t slide_amount
,
2015 vm_shared_region_slide_info_entry_v2_t s_info
)
2017 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2019 const uint64_t delta_mask
= s_info
->delta_mask
;
2020 const uint64_t value_mask
= ~delta_mask
;
2021 const uint64_t value_add
= s_info
->value_add
;
2022 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2024 uint32_t page_offset
= start_offset
;
2027 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2031 loc
= page_content
+ page_offset
;
2032 memcpy(&value
, loc
, sizeof(value
));
2033 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2034 value
&= value_mask
;
2038 value
+= slide_amount
;
2040 memcpy(loc
, &value
, sizeof(value
));
2041 page_offset
+= delta
;
2044 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2045 /* If a pointer straddling the page boundary needs to be adjusted, then
2046 * add the slide to the lower half. The encoding guarantees that the upper
2047 * half on the next page will need no masking.
2049 * This assumes a little-endian machine and that the region being slid
2050 * never crosses a 4 GB boundary. */
2052 uint8_t *loc
= page_content
+ page_offset
;
2055 memcpy(&value
, loc
, sizeof(value
));
2056 value
+= slide_amount
;
2057 memcpy(loc
, &value
, sizeof(value
));
2058 } else if (page_offset
> last_page_offset
) {
2059 return KERN_FAILURE
;
2062 return KERN_SUCCESS
;
2065 static kern_return_t
2069 uint8_t *page_content
,
2070 uint16_t start_offset
,
2071 uint32_t slide_amount
,
2072 vm_shared_region_slide_info_entry_v2_t s_info
)
2076 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2078 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2081 if (kr
!= KERN_SUCCESS
) {
2082 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2083 pageIndex
, start_offset
, slide_amount
);
2088 static kern_return_t
2089 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2091 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->slide_info_entry
->v2
;
2092 const uint32_t slide_amount
= si
->slide
;
2094 /* The high bits of the delta_mask field are nonzero precisely when the shared
2095 * cache is 64-bit. */
2096 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2098 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2099 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2101 uint8_t *page_content
= (uint8_t *)vaddr
;
2102 uint16_t page_entry
;
2104 if (pageIndex
>= s_info
->page_starts_count
) {
2105 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2106 pageIndex
, s_info
->page_starts_count
);
2107 return KERN_FAILURE
;
2109 page_entry
= page_starts
[pageIndex
];
2111 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2112 return KERN_SUCCESS
;
2115 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2116 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2120 uint16_t page_start_offset
;
2123 if (chain_index
>= s_info
->page_extras_count
) {
2124 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2125 chain_index
, s_info
->page_extras_count
);
2126 return KERN_FAILURE
;
2128 info
= page_extras
[chain_index
];
2129 page_start_offset
= (info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2131 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2132 if (kr
!= KERN_SUCCESS
) {
2133 return KERN_FAILURE
;
2137 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2139 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2142 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2143 if (kr
!= KERN_SUCCESS
) {
2144 return KERN_FAILURE
;
2148 return KERN_SUCCESS
;
2152 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2154 if (si
->slide_info_entry
->version
== 1) {
2155 return vm_shared_region_slide_page_v1(si
, vaddr
, pageIndex
);
2157 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2161 /******************************************************************************/
2162 /* Comm page support */
2163 /******************************************************************************/
2165 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2166 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2167 vm_named_entry_t commpage32_entry
= NULL
;
2168 vm_named_entry_t commpage64_entry
= NULL
;
2169 vm_map_t commpage32_map
= VM_MAP_NULL
;
2170 vm_map_t commpage64_map
= VM_MAP_NULL
;
2172 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2173 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2174 vm_named_entry_t commpage_text32_entry
= NULL
;
2175 vm_named_entry_t commpage_text64_entry
= NULL
;
2176 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2177 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
2179 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
2180 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
2182 #if defined(__i386__) || defined(__x86_64__)
2184 * Create a memory entry, VM submap and pmap for one commpage.
2188 ipc_port_t
*handlep
,
2192 vm_named_entry_t mem_entry
;
2195 SHARED_REGION_TRACE_DEBUG(
2196 ("commpage: -> _init(0x%llx)\n",
2199 kr
= mach_memory_entry_allocate(&mem_entry
,
2201 if (kr
!= KERN_SUCCESS
) {
2202 panic("_vm_commpage_init: could not allocate mem_entry");
2204 new_map
= vm_map_create(pmap_create(NULL
, 0, 0), 0, size
, TRUE
);
2205 if (new_map
== VM_MAP_NULL
) {
2206 panic("_vm_commpage_init: could not allocate VM map");
2208 mem_entry
->backing
.map
= new_map
;
2209 mem_entry
->internal
= TRUE
;
2210 mem_entry
->is_sub_map
= TRUE
;
2211 mem_entry
->offset
= 0;
2212 mem_entry
->protection
= VM_PROT_ALL
;
2213 mem_entry
->size
= size
;
2215 SHARED_REGION_TRACE_DEBUG(
2216 ("commpage: _init(0x%llx) <- %p\n",
2217 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
2223 *Initialize the comm text pages at boot time
2225 extern u_int32_t
random(void);
2227 vm_commpage_text_init(void)
2229 SHARED_REGION_TRACE_DEBUG(
2230 ("commpage text: ->init()\n"));
2231 #if defined(__i386__) || defined(__x86_64__)
2232 /* create the 32 bit comm text page */
2233 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
2234 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2235 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
2236 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
2237 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
2238 /* XXX if (cpu_is_64bit_capable()) ? */
2239 /* create the 64-bit comm page */
2240 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
2241 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2242 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
2243 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
2244 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
2246 commpage_text_populate();
2247 #elif defined(__arm64__) || defined(__arm__)
2249 #error Unknown architecture.
2250 #endif /* __i386__ || __x86_64__ */
2251 /* populate the routines in here */
2252 SHARED_REGION_TRACE_DEBUG(
2253 ("commpage text: init() <-\n"));
2258 * Initialize the comm pages at boot time.
2261 vm_commpage_init(void)
2263 SHARED_REGION_TRACE_DEBUG(
2264 ("commpage: -> init()\n"));
2266 #if defined(__i386__) || defined(__x86_64__)
2267 /* create the 32-bit comm page */
2268 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
2269 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
2270 commpage32_map
= commpage32_entry
->backing
.map
;
2272 /* XXX if (cpu_is_64bit_capable()) ? */
2273 /* create the 64-bit comm page */
2274 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
2275 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
2276 commpage64_map
= commpage64_entry
->backing
.map
;
2278 #endif /* __i386__ || __x86_64__ */
2280 /* populate them according to this specific platform */
2281 commpage_populate();
2282 __commpage_setup
= 1;
2283 #if defined(__i386__) || defined(__x86_64__)
2284 if (__system_power_source
== 0) {
2285 post_sys_powersource_internal(0, 1);
2287 #endif /* __i386__ || __x86_64__ */
2289 SHARED_REGION_TRACE_DEBUG(
2290 ("commpage: init() <-\n"));
2294 * Enter the appropriate comm page into the task's address space.
2295 * This is called at exec() time via vm_map_exec().
2303 #if defined(__arm__)
2304 #pragma unused(is64bit)
2307 return KERN_SUCCESS
;
2308 #elif defined(__arm64__)
2309 #pragma unused(is64bit)
2312 pmap_insert_sharedpage(vm_map_pmap(map
));
2313 return KERN_SUCCESS
;
2315 ipc_port_t commpage_handle
, commpage_text_handle
;
2316 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
2317 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
2319 vm_map_kernel_flags_t vmk_flags
;
2322 SHARED_REGION_TRACE_DEBUG(
2323 ("commpage: -> enter(%p,%p)\n",
2324 (void *)VM_KERNEL_ADDRPERM(map
),
2325 (void *)VM_KERNEL_ADDRPERM(task
)));
2327 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
2328 /* the comm page is likely to be beyond the actual end of the VM map */
2329 vm_flags
= VM_FLAGS_FIXED
;
2330 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2331 vmk_flags
.vmkf_beyond_max
= TRUE
;
2333 /* select the appropriate comm page for this task */
2334 assert(! (is64bit
^ vm_map_is_64bit(map
)));
2336 commpage_handle
= commpage64_handle
;
2337 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
2338 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
2339 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
2340 objc_address
= _COMM_PAGE64_OBJC_BASE
;
2341 commpage_text_handle
= commpage_text64_handle
;
2342 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
2344 commpage_handle
= commpage32_handle
;
2346 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
2347 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
2348 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
2349 objc_address
= _COMM_PAGE32_OBJC_BASE
;
2350 commpage_text_handle
= commpage_text32_handle
;
2351 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
2354 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
2355 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
2356 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
2357 /* the commpage is properly aligned or sized for pmap-nesting */
2358 tag
= VM_MEMORY_SHARED_PMAP
;
2360 /* map the comm page in the task's address space */
2361 assert(commpage_handle
!= IPC_PORT_NULL
);
2362 kr
= vm_map_enter_mem_object(
2376 if (kr
!= KERN_SUCCESS
) {
2377 SHARED_REGION_TRACE_ERROR(
2378 ("commpage: enter(%p,0x%llx,0x%llx) "
2379 "commpage %p mapping failed 0x%x\n",
2380 (void *)VM_KERNEL_ADDRPERM(map
),
2381 (long long)commpage_address
,
2382 (long long)commpage_size
,
2383 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
2386 /* map the comm text page in the task's address space */
2387 assert(commpage_text_handle
!= IPC_PORT_NULL
);
2388 kr
= vm_map_enter_mem_object(
2390 &commpage_text_address
,
2396 commpage_text_handle
,
2399 VM_PROT_READ
|VM_PROT_EXECUTE
,
2400 VM_PROT_READ
|VM_PROT_EXECUTE
,
2402 if (kr
!= KERN_SUCCESS
) {
2403 SHARED_REGION_TRACE_ERROR(
2404 ("commpage text: enter(%p,0x%llx,0x%llx) "
2405 "commpage text %p mapping failed 0x%x\n",
2406 (void *)VM_KERNEL_ADDRPERM(map
),
2407 (long long)commpage_text_address
,
2408 (long long)commpage_text_size
,
2409 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2413 * Since we're here, we also pre-allocate some virtual space for the
2414 * Objective-C run-time, if needed...
2416 if (objc_size
!= 0) {
2417 kr
= vm_map_enter_mem_object(
2430 VM_INHERIT_DEFAULT
);
2431 if (kr
!= KERN_SUCCESS
) {
2432 SHARED_REGION_TRACE_ERROR(
2433 ("commpage: enter(%p,0x%llx,0x%llx) "
2434 "objc mapping failed 0x%x\n",
2435 (void *)VM_KERNEL_ADDRPERM(map
),
2436 (long long)objc_address
,
2437 (long long)objc_size
, kr
));
2441 SHARED_REGION_TRACE_DEBUG(
2442 ("commpage: enter(%p,%p) <- 0x%x\n",
2443 (void *)VM_KERNEL_ADDRPERM(map
),
2444 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2450 vm_shared_region_slide(uint32_t slide
,
2451 mach_vm_offset_t entry_start_address
,
2452 mach_vm_size_t entry_size
,
2453 mach_vm_offset_t slide_start
,
2454 mach_vm_size_t slide_size
,
2455 memory_object_control_t sr_file_control
)
2457 void *slide_info_entry
= NULL
;
2459 vm_shared_region_t sr
;
2461 SHARED_REGION_TRACE_DEBUG(
2462 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2463 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2465 sr
= vm_shared_region_get(current_task());
2467 printf("%s: no shared region?\n", __FUNCTION__
);
2468 SHARED_REGION_TRACE_DEBUG(
2469 ("vm_shared_region_slide: <- %d (no shared region)\n",
2471 return KERN_FAILURE
;
2475 * Protect from concurrent access.
2477 vm_shared_region_lock();
2478 while(sr
->sr_slide_in_progress
) {
2479 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
2482 #ifndef CONFIG_EMBEDDED
2483 || shared_region_completed_slide
2486 vm_shared_region_unlock();
2488 vm_shared_region_deallocate(sr
);
2489 printf("%s: shared region already slid?\n", __FUNCTION__
);
2490 SHARED_REGION_TRACE_DEBUG(
2491 ("vm_shared_region_slide: <- %d (already slid)\n",
2493 return KERN_FAILURE
;
2496 sr
->sr_slide_in_progress
= TRUE
;
2497 vm_shared_region_unlock();
2499 if((error
= vm_shared_region_slide_init(sr
, slide_size
, entry_start_address
, entry_size
, slide
, sr_file_control
))) {
2500 printf("slide_info initialization failed with kr=%d\n", error
);
2504 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
2505 if (slide_info_entry
== NULL
){
2506 error
= KERN_FAILURE
;
2508 error
= copyin((user_addr_t
)slide_start
,
2510 (vm_size_t
)slide_size
);
2512 error
= KERN_INVALID_ADDRESS
;
2519 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
2520 error
= KERN_INVALID_ARGUMENT
;
2521 printf("Sanity Check failed for slide_info\n");
2524 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2525 (void*)(uintptr_t)entry_start_address
,
2526 (unsigned long)entry_size
,
2527 (unsigned long)slide_size
);
2531 vm_shared_region_lock();
2533 assert(sr
->sr_slide_in_progress
);
2534 assert(sr
->sr_slid
== FALSE
);
2535 sr
->sr_slide_in_progress
= FALSE
;
2536 thread_wakeup(&sr
->sr_slide_in_progress
);
2538 if (error
== KERN_SUCCESS
) {
2542 * We don't know how to tear down a slid shared region today, because
2543 * we would have to invalidate all the pages that have been slid
2544 * atomically with respect to anyone mapping the shared region afresh.
2545 * Therefore, take a dangling reference to prevent teardown.
2548 #ifndef CONFIG_EMBEDDED
2549 shared_region_completed_slide
= TRUE
;
2552 vm_shared_region_unlock();
2554 vm_shared_region_deallocate(sr
);
2556 SHARED_REGION_TRACE_DEBUG(
2557 ("vm_shared_region_slide: <- %d\n",
2564 * This is called from powermanagement code to let kernel know the current source of power.
2565 * 0 if it is external source (connected to power )
2566 * 1 if it is internal power source ie battery
2569 #if defined(__i386__) || defined(__x86_64__)
2570 post_sys_powersource(int i
)
2572 post_sys_powersource(__unused
int i
)
2575 #if defined(__i386__) || defined(__x86_64__)
2576 post_sys_powersource_internal(i
, 0);
2577 #endif /* __i386__ || __x86_64__ */
2581 #if defined(__i386__) || defined(__x86_64__)
2583 post_sys_powersource_internal(int i
, int internal
)
2586 __system_power_source
= i
;
2588 if (__commpage_setup
!= 0) {
2589 if (__system_power_source
!= 0)
2590 commpage_set_spin_count(0);
2592 commpage_set_spin_count(MP_SPIN_TRIES
);
2595 #endif /* __i386__ || __x86_64__ */