2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 #if defined (__arm__) || defined(__arm64__)
116 #include <arm/cpu_data_internal.h>
120 * the following codes are used in the subclass
121 * of the DBG_MACH_SHAREDREGION class
123 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
126 /* "dyld" uses this to figure out what the kernel supports */
127 int shared_region_version
= 3;
129 /* trace level, output is sent to the system log file */
130 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
132 /* should local (non-chroot) shared regions persist when no task uses them ? */
133 int shared_region_persistence
= 0; /* no by default */
135 /* delay before reclaiming an unused shared region */
136 int shared_region_destroy_delay
= 120; /* in seconds */
138 struct vm_shared_region
*init_task_shared_region
= NULL
;
140 #ifndef CONFIG_EMBEDDED
142 * Only one cache gets to slide on Desktop, since we can't
143 * tear down slide info properly today and the desktop actually
144 * produces lots of shared caches.
146 boolean_t shared_region_completed_slide
= FALSE
;
149 /* this lock protects all the shared region data structures */
150 lck_grp_t
*vm_shared_region_lck_grp
;
151 lck_mtx_t vm_shared_region_lock
;
153 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
154 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
155 #define vm_shared_region_sleep(event, interruptible) \
156 lck_mtx_sleep(&vm_shared_region_lock, \
161 /* the list of currently available shared regions (one per environment) */
162 queue_head_t vm_shared_region_queue
;
164 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
165 static vm_shared_region_t
vm_shared_region_create(
168 cpu_subtype_t cpu_subtype
,
170 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
172 static void vm_shared_region_timeout(thread_call_param_t param0
,
173 thread_call_param_t param1
);
174 kern_return_t
vm_shared_region_slide_mapping(
175 vm_shared_region_t sr
,
176 mach_vm_size_t slide_info_size
,
177 mach_vm_offset_t start
,
179 mach_vm_offset_t slid_mapping
,
181 memory_object_control_t
); /* forward */
183 static int __commpage_setup
= 0;
184 #if defined(__i386__) || defined(__x86_64__)
185 static int __system_power_source
= 1; /* init to extrnal power source */
186 static void post_sys_powersource_internal(int i
, int internal
);
187 #endif /* __i386__ || __x86_64__ */
191 * Initialize the module...
194 vm_shared_region_init(void)
196 SHARED_REGION_TRACE_DEBUG(
197 ("shared_region: -> init\n"));
199 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
201 lck_mtx_init(&vm_shared_region_lock
,
202 vm_shared_region_lck_grp
,
205 queue_init(&vm_shared_region_queue
);
207 SHARED_REGION_TRACE_DEBUG(
208 ("shared_region: <- init\n"));
212 * Retrieve a task's shared region and grab an extra reference to
213 * make sure it doesn't disappear while the caller is using it.
214 * The caller is responsible for consuming that extra reference if
218 vm_shared_region_get(
221 vm_shared_region_t shared_region
;
223 SHARED_REGION_TRACE_DEBUG(
224 ("shared_region: -> get(%p)\n",
225 (void *)VM_KERNEL_ADDRPERM(task
)));
228 vm_shared_region_lock();
229 shared_region
= task
->shared_region
;
231 assert(shared_region
->sr_ref_count
> 0);
232 vm_shared_region_reference_locked(shared_region
);
234 vm_shared_region_unlock();
237 SHARED_REGION_TRACE_DEBUG(
238 ("shared_region: get(%p) <- %p\n",
239 (void *)VM_KERNEL_ADDRPERM(task
),
240 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
242 return shared_region
;
246 * Get the base address of the shared region.
247 * That's the address at which it needs to be mapped in the process's address
249 * No need to lock since this data is set when the shared region is
250 * created and is never modified after that. The caller must hold an extra
251 * reference on the shared region to prevent it from being destroyed.
254 vm_shared_region_base_address(
255 vm_shared_region_t shared_region
)
257 SHARED_REGION_TRACE_DEBUG(
258 ("shared_region: -> base_address(%p)\n",
259 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
260 assert(shared_region
->sr_ref_count
> 1);
261 SHARED_REGION_TRACE_DEBUG(
262 ("shared_region: base_address(%p) <- 0x%llx\n",
263 (void *)VM_KERNEL_ADDRPERM(shared_region
),
264 (long long)shared_region
->sr_base_address
));
265 return shared_region
->sr_base_address
;
269 * Get the size of the shared region.
270 * That's the size that needs to be mapped in the process's address
272 * No need to lock since this data is set when the shared region is
273 * created and is never modified after that. The caller must hold an extra
274 * reference on the shared region to prevent it from being destroyed.
277 vm_shared_region_size(
278 vm_shared_region_t shared_region
)
280 SHARED_REGION_TRACE_DEBUG(
281 ("shared_region: -> size(%p)\n",
282 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
283 assert(shared_region
->sr_ref_count
> 1);
284 SHARED_REGION_TRACE_DEBUG(
285 ("shared_region: size(%p) <- 0x%llx\n",
286 (void *)VM_KERNEL_ADDRPERM(shared_region
),
287 (long long)shared_region
->sr_size
));
288 return shared_region
->sr_size
;
292 * Get the memory entry of the shared region.
293 * That's the "memory object" that needs to be mapped in the process's address
295 * No need to lock since this data is set when the shared region is
296 * created and is never modified after that. The caller must hold an extra
297 * reference on the shared region to prevent it from being destroyed.
300 vm_shared_region_mem_entry(
301 vm_shared_region_t shared_region
)
303 SHARED_REGION_TRACE_DEBUG(
304 ("shared_region: -> mem_entry(%p)\n",
305 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
306 assert(shared_region
->sr_ref_count
> 1);
307 SHARED_REGION_TRACE_DEBUG(
308 ("shared_region: mem_entry(%p) <- %p\n",
309 (void *)VM_KERNEL_ADDRPERM(shared_region
),
310 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
311 return shared_region
->sr_mem_entry
;
315 vm_shared_region_vm_map(
316 vm_shared_region_t shared_region
)
318 ipc_port_t sr_handle
;
319 vm_named_entry_t sr_mem_entry
;
322 SHARED_REGION_TRACE_DEBUG(
323 ("shared_region: -> vm_map(%p)\n",
324 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
325 assert(shared_region
->sr_ref_count
> 1);
327 sr_handle
= shared_region
->sr_mem_entry
;
328 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
329 sr_map
= sr_mem_entry
->backing
.map
;
330 assert(sr_mem_entry
->is_sub_map
);
332 SHARED_REGION_TRACE_DEBUG(
333 ("shared_region: vm_map(%p) <- %p\n",
334 (void *)VM_KERNEL_ADDRPERM(shared_region
),
335 (void *)VM_KERNEL_ADDRPERM(sr_map
)));
339 vm_shared_region_get_slide(
340 vm_shared_region_t shared_region
)
342 SHARED_REGION_TRACE_DEBUG(
343 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
344 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
345 assert(shared_region
->sr_ref_count
> 1);
346 SHARED_REGION_TRACE_DEBUG(
347 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
348 (void *)VM_KERNEL_ADDRPERM(shared_region
),
349 shared_region
->sr_slide_info
.slide
));
351 /* 0 if we haven't slid */
352 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
353 shared_region
->sr_slide_info
.slide
== 0);
355 return shared_region
->sr_slide_info
.slide
;
358 vm_shared_region_slide_info_t
359 vm_shared_region_get_slide_info(
360 vm_shared_region_t shared_region
)
362 SHARED_REGION_TRACE_DEBUG(
363 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
364 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
365 assert(shared_region
->sr_ref_count
> 1);
366 SHARED_REGION_TRACE_DEBUG(
367 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
368 (void *)VM_KERNEL_ADDRPERM(shared_region
),
369 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
370 return &shared_region
->sr_slide_info
;
374 * Set the shared region the process should use.
375 * A NULL new shared region means that we just want to release the old
377 * The caller should already have an extra reference on the new shared region
378 * (if any). We release a reference on the old shared region (if any).
381 vm_shared_region_set(
383 vm_shared_region_t new_shared_region
)
385 vm_shared_region_t old_shared_region
;
387 SHARED_REGION_TRACE_DEBUG(
388 ("shared_region: -> set(%p, %p)\n",
389 (void *)VM_KERNEL_ADDRPERM(task
),
390 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
393 vm_shared_region_lock();
395 old_shared_region
= task
->shared_region
;
396 if (new_shared_region
) {
397 assert(new_shared_region
->sr_ref_count
> 0);
400 task
->shared_region
= new_shared_region
;
402 vm_shared_region_unlock();
405 if (old_shared_region
) {
406 assert(old_shared_region
->sr_ref_count
> 0);
407 vm_shared_region_deallocate(old_shared_region
);
410 SHARED_REGION_TRACE_DEBUG(
411 ("shared_region: set(%p) <- old=%p new=%p\n",
412 (void *)VM_KERNEL_ADDRPERM(task
),
413 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
414 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
418 * Lookup up the shared region for the desired environment.
419 * If none is found, create a new (empty) one.
420 * Grab an extra reference on the returned shared region, to make sure
421 * it doesn't get destroyed before the caller is done with it. The caller
422 * is responsible for consuming that extra reference if necessary.
425 vm_shared_region_lookup(
428 cpu_subtype_t cpu_subtype
,
431 vm_shared_region_t shared_region
;
432 vm_shared_region_t new_shared_region
;
434 SHARED_REGION_TRACE_DEBUG(
435 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n",
437 (void *)VM_KERNEL_ADDRPERM(root_dir
),
438 cputype
, cpu_subtype
, is_64bit
));
440 shared_region
= NULL
;
441 new_shared_region
= NULL
;
443 vm_shared_region_lock();
445 queue_iterate(&vm_shared_region_queue
,
449 assert(shared_region
->sr_ref_count
> 0);
450 if (shared_region
->sr_cpu_type
== cputype
&&
451 shared_region
->sr_cpu_subtype
== cpu_subtype
&&
452 shared_region
->sr_root_dir
== root_dir
&&
453 shared_region
->sr_64bit
== is_64bit
) {
454 /* found a match ! */
455 vm_shared_region_reference_locked(shared_region
);
459 if (new_shared_region
== NULL
) {
460 /* no match: create a new one */
461 vm_shared_region_unlock();
462 new_shared_region
= vm_shared_region_create(root_dir
,
466 /* do the lookup again, in case we lost a race */
467 vm_shared_region_lock();
470 /* still no match: use our new one */
471 shared_region
= new_shared_region
;
472 new_shared_region
= NULL
;
473 queue_enter(&vm_shared_region_queue
,
481 vm_shared_region_unlock();
483 if (new_shared_region
) {
485 * We lost a race with someone else to create a new shared
486 * region for that environment. Get rid of our unused one.
488 assert(new_shared_region
->sr_ref_count
== 1);
489 new_shared_region
->sr_ref_count
--;
490 vm_shared_region_destroy(new_shared_region
);
491 new_shared_region
= NULL
;
494 SHARED_REGION_TRACE_DEBUG(
495 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n",
496 (void *)VM_KERNEL_ADDRPERM(root_dir
),
497 cputype
, cpu_subtype
, is_64bit
,
498 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
500 assert(shared_region
->sr_ref_count
> 0);
501 return shared_region
;
505 * Take an extra reference on a shared region.
506 * The vm_shared_region_lock should already be held by the caller.
509 vm_shared_region_reference_locked(
510 vm_shared_region_t shared_region
)
512 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
514 SHARED_REGION_TRACE_DEBUG(
515 ("shared_region: -> reference_locked(%p)\n",
516 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
517 assert(shared_region
->sr_ref_count
> 0);
518 shared_region
->sr_ref_count
++;
520 if (shared_region
->sr_timer_call
!= NULL
) {
523 /* cancel and free any pending timeout */
524 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
526 thread_call_free(shared_region
->sr_timer_call
);
527 shared_region
->sr_timer_call
= NULL
;
528 /* release the reference held by the cancelled timer */
529 shared_region
->sr_ref_count
--;
531 /* the timer will drop the reference and free itself */
535 SHARED_REGION_TRACE_DEBUG(
536 ("shared_region: reference_locked(%p) <- %d\n",
537 (void *)VM_KERNEL_ADDRPERM(shared_region
),
538 shared_region
->sr_ref_count
));
542 * Release a reference on the shared region.
543 * Destroy it if there are no references left.
546 vm_shared_region_deallocate(
547 vm_shared_region_t shared_region
)
549 SHARED_REGION_TRACE_DEBUG(
550 ("shared_region: -> deallocate(%p)\n",
551 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
553 vm_shared_region_lock();
555 assert(shared_region
->sr_ref_count
> 0);
557 if (shared_region
->sr_root_dir
== NULL
) {
559 * Local (i.e. based on the boot volume) shared regions
560 * can persist or not based on the "shared_region_persistence"
562 * Make sure that this one complies.
564 * See comments in vm_shared_region_slide() for notes about
565 * shared regions we have slid (which are not torn down currently).
567 if (shared_region_persistence
&&
568 !shared_region
->sr_persists
) {
569 /* make this one persistent */
570 shared_region
->sr_ref_count
++;
571 shared_region
->sr_persists
= TRUE
;
572 } else if (!shared_region_persistence
&&
573 shared_region
->sr_persists
) {
574 /* make this one no longer persistent */
575 assert(shared_region
->sr_ref_count
> 1);
576 shared_region
->sr_ref_count
--;
577 shared_region
->sr_persists
= FALSE
;
581 assert(shared_region
->sr_ref_count
> 0);
582 shared_region
->sr_ref_count
--;
583 SHARED_REGION_TRACE_DEBUG(
584 ("shared_region: deallocate(%p): ref now %d\n",
585 (void *)VM_KERNEL_ADDRPERM(shared_region
),
586 shared_region
->sr_ref_count
));
588 if (shared_region
->sr_ref_count
== 0) {
591 assert(!shared_region
->sr_slid
);
593 if (shared_region
->sr_timer_call
== NULL
) {
594 /* hold one reference for the timer */
595 assert(! shared_region
->sr_mapping_in_progress
);
596 shared_region
->sr_ref_count
++;
598 /* set up the timer */
599 shared_region
->sr_timer_call
= thread_call_allocate(
600 (thread_call_func_t
) vm_shared_region_timeout
,
601 (thread_call_param_t
) shared_region
);
603 /* schedule the timer */
604 clock_interval_to_deadline(shared_region_destroy_delay
,
607 thread_call_enter_delayed(shared_region
->sr_timer_call
,
610 SHARED_REGION_TRACE_DEBUG(
611 ("shared_region: deallocate(%p): armed timer\n",
612 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
614 vm_shared_region_unlock();
616 /* timer expired: let go of this shared region */
619 * We can't properly handle teardown of a slid object today.
621 assert(!shared_region
->sr_slid
);
624 * Remove it from the queue first, so no one can find
627 queue_remove(&vm_shared_region_queue
,
631 vm_shared_region_unlock();
633 /* ... and destroy it */
634 vm_shared_region_destroy(shared_region
);
635 shared_region
= NULL
;
638 vm_shared_region_unlock();
641 SHARED_REGION_TRACE_DEBUG(
642 ("shared_region: deallocate(%p) <-\n",
643 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
647 vm_shared_region_timeout(
648 thread_call_param_t param0
,
649 __unused thread_call_param_t param1
)
651 vm_shared_region_t shared_region
;
653 shared_region
= (vm_shared_region_t
) param0
;
655 vm_shared_region_deallocate(shared_region
);
659 * Create a new (empty) shared region for a new environment.
661 static vm_shared_region_t
662 vm_shared_region_create(
665 cpu_subtype_t cpu_subtype
,
669 vm_named_entry_t mem_entry
;
670 ipc_port_t mem_entry_port
;
671 vm_shared_region_t shared_region
;
672 vm_shared_region_slide_info_t si
;
674 mach_vm_offset_t base_address
, pmap_nesting_start
;
675 mach_vm_size_t size
, pmap_nesting_size
;
677 SHARED_REGION_TRACE_INFO(
678 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n",
679 (void *)VM_KERNEL_ADDRPERM(root_dir
),
680 cputype
, cpu_subtype
, is_64bit
));
685 mem_entry_port
= IPC_PORT_NULL
;
686 sub_map
= VM_MAP_NULL
;
688 /* create a new shared region structure... */
689 shared_region
= kalloc(sizeof (*shared_region
));
690 if (shared_region
== NULL
) {
691 SHARED_REGION_TRACE_ERROR(
692 ("shared_region: create: couldn't allocate\n"));
696 /* figure out the correct settings for the desired environment */
699 #if defined(__arm64__)
701 base_address
= SHARED_REGION_BASE_ARM64
;
702 size
= SHARED_REGION_SIZE_ARM64
;
703 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
704 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
706 #elif !defined(__arm__)
708 base_address
= SHARED_REGION_BASE_X86_64
;
709 size
= SHARED_REGION_SIZE_X86_64
;
710 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
711 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
713 case CPU_TYPE_POWERPC
:
714 base_address
= SHARED_REGION_BASE_PPC64
;
715 size
= SHARED_REGION_SIZE_PPC64
;
716 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
717 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
721 SHARED_REGION_TRACE_ERROR(
722 ("shared_region: create: unknown cpu type %d\n",
724 kfree(shared_region
, sizeof (*shared_region
));
725 shared_region
= NULL
;
730 #if defined(__arm__) || defined(__arm64__)
733 base_address
= SHARED_REGION_BASE_ARM
;
734 size
= SHARED_REGION_SIZE_ARM
;
735 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
736 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
740 base_address
= SHARED_REGION_BASE_I386
;
741 size
= SHARED_REGION_SIZE_I386
;
742 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
743 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
745 case CPU_TYPE_POWERPC
:
746 base_address
= SHARED_REGION_BASE_PPC
;
747 size
= SHARED_REGION_SIZE_PPC
;
748 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
749 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
753 SHARED_REGION_TRACE_ERROR(
754 ("shared_region: create: unknown cpu type %d\n",
756 kfree(shared_region
, sizeof (*shared_region
));
757 shared_region
= NULL
;
762 /* create a memory entry structure and a Mach port handle */
763 kr
= mach_memory_entry_allocate(&mem_entry
,
765 if (kr
!= KERN_SUCCESS
) {
766 kfree(shared_region
, sizeof (*shared_region
));
767 shared_region
= NULL
;
768 SHARED_REGION_TRACE_ERROR(
769 ("shared_region: create: "
770 "couldn't allocate mem_entry\n"));
774 #if defined(__arm__) || defined(__arm64__)
776 struct pmap
*pmap_nested
;
778 pmap_nested
= pmap_create(NULL
, 0, is_64bit
);
779 if (pmap_nested
!= PMAP_NULL
) {
780 pmap_set_nested(pmap_nested
);
781 sub_map
= vm_map_create(pmap_nested
, 0, size
, TRUE
);
782 #if defined(__arm64__)
784 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
785 /* enforce 16KB alignment of VM map entries */
786 vm_map_set_page_shift(sub_map
,
787 SIXTEENK_PAGE_SHIFT
);
789 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
790 /* enforce 16KB alignment for watch targets with new ABI */
791 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
792 #endif /* __arm64__ */
794 sub_map
= VM_MAP_NULL
;
798 /* create a VM sub map and its pmap */
799 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
803 if (sub_map
== VM_MAP_NULL
) {
804 ipc_port_release_send(mem_entry_port
);
805 kfree(shared_region
, sizeof (*shared_region
));
806 shared_region
= NULL
;
807 SHARED_REGION_TRACE_ERROR(
808 ("shared_region: create: "
809 "couldn't allocate map\n"));
813 assert(!sub_map
->disable_vmentry_reuse
);
814 sub_map
->is_nested_map
= TRUE
;
816 /* make the memory entry point to the VM sub map */
817 mem_entry
->is_sub_map
= TRUE
;
818 mem_entry
->backing
.map
= sub_map
;
819 mem_entry
->size
= size
;
820 mem_entry
->protection
= VM_PROT_ALL
;
822 /* make the shared region point at the memory entry */
823 shared_region
->sr_mem_entry
= mem_entry_port
;
825 /* fill in the shared region's environment and settings */
826 shared_region
->sr_base_address
= base_address
;
827 shared_region
->sr_size
= size
;
828 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
829 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
830 shared_region
->sr_cpu_type
= cputype
;
831 shared_region
->sr_cpu_subtype
= cpu_subtype
;
832 shared_region
->sr_64bit
= is_64bit
;
833 shared_region
->sr_root_dir
= root_dir
;
835 queue_init(&shared_region
->sr_q
);
836 shared_region
->sr_mapping_in_progress
= FALSE
;
837 shared_region
->sr_slide_in_progress
= FALSE
;
838 shared_region
->sr_persists
= FALSE
;
839 shared_region
->sr_slid
= FALSE
;
840 shared_region
->sr_timer_call
= NULL
;
841 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
843 /* grab a reference for the caller */
844 shared_region
->sr_ref_count
= 1;
846 /* And set up slide info */
847 si
= &shared_region
->sr_slide_info
;
851 si
->slide_object
= NULL
;
852 si
->slide_info_size
= 0;
853 si
->slide_info_entry
= NULL
;
855 /* Initialize UUID and other metadata */
856 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
857 shared_region
->sr_uuid_copied
= FALSE
;
858 shared_region
->sr_images_count
= 0;
859 shared_region
->sr_images
= NULL
;
862 SHARED_REGION_TRACE_INFO(
863 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
864 "base=0x%llx,size=0x%llx) <- "
865 "%p mem=(%p,%p) map=%p pmap=%p\n",
866 (void *)VM_KERNEL_ADDRPERM(root_dir
),
867 cputype
, cpu_subtype
, is_64bit
,
868 (long long)base_address
,
870 (void *)VM_KERNEL_ADDRPERM(shared_region
),
871 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
872 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
873 (void *)VM_KERNEL_ADDRPERM(sub_map
),
874 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
876 SHARED_REGION_TRACE_INFO(
877 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
878 "base=0x%llx,size=0x%llx) <- NULL",
879 (void *)VM_KERNEL_ADDRPERM(root_dir
),
880 cputype
, cpu_subtype
, is_64bit
,
881 (long long)base_address
,
884 return shared_region
;
888 * Destroy a now-unused shared region.
889 * The shared region is no longer in the queue and can not be looked up.
892 vm_shared_region_destroy(
893 vm_shared_region_t shared_region
)
895 vm_named_entry_t mem_entry
;
898 SHARED_REGION_TRACE_INFO(
899 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
900 (void *)VM_KERNEL_ADDRPERM(shared_region
),
901 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
902 shared_region
->sr_cpu_type
,
903 shared_region
->sr_cpu_subtype
,
904 shared_region
->sr_64bit
));
906 assert(shared_region
->sr_ref_count
== 0);
907 assert(!shared_region
->sr_persists
);
908 assert(!shared_region
->sr_slid
);
910 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
911 assert(mem_entry
->is_sub_map
);
912 assert(!mem_entry
->internal
);
913 assert(!mem_entry
->is_copy
);
914 map
= mem_entry
->backing
.map
;
917 * Clean up the pmap first. The virtual addresses that were
918 * entered in this possibly "nested" pmap may have different values
919 * than the VM map's min and max offsets, if the VM sub map was
920 * mapped at a non-zero offset in the processes' main VM maps, which
921 * is usually the case, so the clean-up we do in vm_map_destroy() would
925 pmap_remove(map
->pmap
,
926 shared_region
->sr_base_address
,
927 (shared_region
->sr_base_address
+
928 shared_region
->sr_size
));
932 * Release our (one and only) handle on the memory entry.
933 * This will generate a no-senders notification, which will be processed
934 * by ipc_kobject_notify(), which will release the one and only
935 * reference on the memory entry and cause it to be destroyed, along
936 * with the VM sub map and its pmap.
938 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
940 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
942 if (shared_region
->sr_timer_call
) {
943 thread_call_free(shared_region
->sr_timer_call
);
948 * If slid, free those resources. We'll want this eventually,
949 * but can't handle it properly today.
951 si
= &shared_region
->sr_slide_info
;
952 if (si
->slide_info_entry
) {
953 kmem_free(kernel_map
,
954 (vm_offset_t
) si
->slide_info_entry
,
955 (vm_size_t
) si
->slide_info_size
);
956 vm_object_deallocate(si
->slide_object
);
960 /* release the shared region structure... */
961 kfree(shared_region
, sizeof (*shared_region
));
963 SHARED_REGION_TRACE_DEBUG(
964 ("shared_region: destroy(%p) <-\n",
965 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
966 shared_region
= NULL
;
971 * Gets the address of the first (in time) mapping in the shared region.
974 vm_shared_region_start_address(
975 vm_shared_region_t shared_region
,
976 mach_vm_offset_t
*start_address
)
979 mach_vm_offset_t sr_base_address
;
980 mach_vm_offset_t sr_first_mapping
;
982 SHARED_REGION_TRACE_DEBUG(
983 ("shared_region: -> start_address(%p)\n",
984 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
985 assert(shared_region
->sr_ref_count
> 1);
987 vm_shared_region_lock();
990 * Wait if there's another thread establishing a mapping
991 * in this shared region right when we're looking at it.
992 * We want a consistent view of the map...
994 while (shared_region
->sr_mapping_in_progress
) {
995 /* wait for our turn... */
996 assert(shared_region
->sr_ref_count
> 1);
997 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1000 assert(! shared_region
->sr_mapping_in_progress
);
1001 assert(shared_region
->sr_ref_count
> 1);
1003 sr_base_address
= shared_region
->sr_base_address
;
1004 sr_first_mapping
= shared_region
->sr_first_mapping
;
1006 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
1007 /* shared region is empty */
1008 kr
= KERN_INVALID_ADDRESS
;
1011 *start_address
= sr_base_address
+ sr_first_mapping
;
1014 vm_shared_region_unlock();
1016 SHARED_REGION_TRACE_DEBUG(
1017 ("shared_region: start_address(%p) <- 0x%llx\n",
1018 (void *)VM_KERNEL_ADDRPERM(shared_region
),
1019 (long long)shared_region
->sr_base_address
));
1025 vm_shared_region_undo_mappings(
1027 mach_vm_offset_t sr_base_address
,
1028 struct shared_file_mapping_np
*mappings
,
1029 unsigned int mappings_count
)
1032 vm_shared_region_t shared_region
= NULL
;
1033 boolean_t reset_shared_region_state
= FALSE
;
1035 shared_region
= vm_shared_region_get(current_task());
1036 if (shared_region
== NULL
) {
1037 printf("Failed to undo mappings because of NULL shared region.\n");
1042 if (sr_map
== NULL
) {
1043 ipc_port_t sr_handle
;
1044 vm_named_entry_t sr_mem_entry
;
1046 vm_shared_region_lock();
1047 assert(shared_region
->sr_ref_count
> 1);
1049 while (shared_region
->sr_mapping_in_progress
) {
1050 /* wait for our turn... */
1051 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1054 assert(! shared_region
->sr_mapping_in_progress
);
1055 assert(shared_region
->sr_ref_count
> 1);
1056 /* let others know we're working in this shared region */
1057 shared_region
->sr_mapping_in_progress
= TRUE
;
1059 vm_shared_region_unlock();
1061 reset_shared_region_state
= TRUE
;
1063 /* no need to lock because this data is never modified... */
1064 sr_handle
= shared_region
->sr_mem_entry
;
1065 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1066 sr_map
= sr_mem_entry
->backing
.map
;
1067 sr_base_address
= shared_region
->sr_base_address
;
1070 * Undo the mappings we've established so far.
1072 for (j
= 0; j
< mappings_count
; j
++) {
1075 if (mappings
[j
].sfm_size
== 0) {
1077 * We didn't establish this
1078 * mapping, so nothing to undo.
1082 SHARED_REGION_TRACE_INFO(
1083 ("shared_region: mapping[%d]: "
1084 "address:0x%016llx "
1087 "maxprot:0x%x prot:0x%x: "
1090 (long long)mappings
[j
].sfm_address
,
1091 (long long)mappings
[j
].sfm_size
,
1092 (long long)mappings
[j
].sfm_file_offset
,
1093 mappings
[j
].sfm_max_prot
,
1094 mappings
[j
].sfm_init_prot
));
1095 kr2
= mach_vm_deallocate(
1097 (mappings
[j
].sfm_address
-
1099 mappings
[j
].sfm_size
);
1100 assert(kr2
== KERN_SUCCESS
);
1103 if (reset_shared_region_state
) {
1104 vm_shared_region_lock();
1105 assert(shared_region
->sr_ref_count
> 1);
1106 assert(shared_region
->sr_mapping_in_progress
);
1107 /* we're done working on that shared region */
1108 shared_region
->sr_mapping_in_progress
= FALSE
;
1109 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1110 vm_shared_region_unlock();
1111 reset_shared_region_state
= FALSE
;
1114 vm_shared_region_deallocate(shared_region
);
1118 * Establish some mappings of a file in the shared region.
1119 * This is used by "dyld" via the shared_region_map_np() system call
1120 * to populate the shared region with the appropriate shared cache.
1122 * One could also call it several times to incrementally load several
1123 * libraries, as long as they do not overlap.
1124 * It will return KERN_SUCCESS if the mappings were successfully established
1125 * or if they were already established identically by another process.
1128 vm_shared_region_map_file(
1129 vm_shared_region_t shared_region
,
1130 unsigned int mappings_count
,
1131 struct shared_file_mapping_np
*mappings
,
1132 memory_object_control_t file_control
,
1133 memory_object_size_t file_size
,
1136 user_addr_t slide_start
,
1137 user_addr_t slide_size
)
1140 vm_object_t file_object
;
1141 ipc_port_t sr_handle
;
1142 vm_named_entry_t sr_mem_entry
;
1144 mach_vm_offset_t sr_base_address
;
1146 mach_port_t map_port
;
1147 vm_map_offset_t target_address
;
1149 vm_object_size_t obj_size
;
1150 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1151 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1152 mach_vm_offset_t slid_mapping
= (mach_vm_offset_t
) -1;
1153 vm_map_offset_t lowest_unnestable_addr
= 0;
1154 vm_map_kernel_flags_t vmk_flags
;
1155 mach_vm_offset_t sfm_min_address
= ~0;
1156 mach_vm_offset_t sfm_max_address
= 0;
1157 struct _dyld_cache_header sr_cache_header
;
1160 if ((shared_region
->sr_64bit
||
1161 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1162 ((slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1163 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1164 __FUNCTION__
, slide
);
1165 kr
= KERN_INVALID_ARGUMENT
;
1168 #endif /* __arm64__ */
1172 vm_shared_region_lock();
1173 assert(shared_region
->sr_ref_count
> 1);
1175 if (shared_region
->sr_root_dir
!= root_dir
) {
1177 * This shared region doesn't match the current root
1178 * directory of this process. Deny the mapping to
1179 * avoid tainting the shared region with something that
1180 * doesn't quite belong into it.
1182 vm_shared_region_unlock();
1183 kr
= KERN_PROTECTION_FAILURE
;
1188 * Make sure we handle only one mapping at a time in a given
1189 * shared region, to avoid race conditions. This should not
1190 * happen frequently...
1192 while (shared_region
->sr_mapping_in_progress
) {
1193 /* wait for our turn... */
1194 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1197 assert(! shared_region
->sr_mapping_in_progress
);
1198 assert(shared_region
->sr_ref_count
> 1);
1199 /* let others know we're working in this shared region */
1200 shared_region
->sr_mapping_in_progress
= TRUE
;
1202 vm_shared_region_unlock();
1204 /* no need to lock because this data is never modified... */
1205 sr_handle
= shared_region
->sr_mem_entry
;
1206 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1207 sr_map
= sr_mem_entry
->backing
.map
;
1208 sr_base_address
= shared_region
->sr_base_address
;
1210 SHARED_REGION_TRACE_DEBUG(
1211 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1212 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1213 (void *)VM_KERNEL_ADDRPERM(mappings
),
1214 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1216 /* get the VM object associated with the file to be mapped */
1217 file_object
= memory_object_control_to_vm_object(file_control
);
1219 assert(file_object
);
1221 /* establish the mappings */
1222 for (i
= 0; i
< mappings_count
; i
++) {
1223 SHARED_REGION_TRACE_INFO(
1224 ("shared_region: mapping[%d]: "
1225 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1226 "maxprot:0x%x prot:0x%x\n",
1228 (long long)mappings
[i
].sfm_address
,
1229 (long long)mappings
[i
].sfm_size
,
1230 (long long)mappings
[i
].sfm_file_offset
,
1231 mappings
[i
].sfm_max_prot
,
1232 mappings
[i
].sfm_init_prot
));
1234 if (mappings
[i
].sfm_address
< sfm_min_address
) {
1235 sfm_min_address
= mappings
[i
].sfm_address
;
1238 if ((mappings
[i
].sfm_address
+ mappings
[i
].sfm_size
) > sfm_max_address
) {
1239 sfm_max_address
= mappings
[i
].sfm_address
+ mappings
[i
].sfm_size
;
1242 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1243 /* zero-filled memory */
1244 map_port
= MACH_PORT_NULL
;
1246 /* file-backed memory */
1247 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1250 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1252 * This is the mapping that needs to be slid.
1254 if (mapping_to_slide
!= NULL
) {
1255 SHARED_REGION_TRACE_INFO(
1256 ("shared_region: mapping[%d]: "
1257 "address:0x%016llx size:0x%016llx "
1259 "maxprot:0x%x prot:0x%x "
1260 "will not be slid as only one such mapping is allowed...\n",
1262 (long long)mappings
[i
].sfm_address
,
1263 (long long)mappings
[i
].sfm_size
,
1264 (long long)mappings
[i
].sfm_file_offset
,
1265 mappings
[i
].sfm_max_prot
,
1266 mappings
[i
].sfm_init_prot
));
1268 mapping_to_slide
= &mappings
[i
];
1272 /* mapping's address is relative to the shared region base */
1274 mappings
[i
].sfm_address
- sr_base_address
;
1276 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1277 vmk_flags
.vmkf_already
= TRUE
;
1279 /* establish that mapping, OK if it's "already" there */
1280 if (map_port
== MACH_PORT_NULL
) {
1282 * We want to map some anonymous memory in a
1284 * We have to create the VM object now, so that it
1285 * can be mapped "copy-on-write".
1287 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1288 VM_MAP_PAGE_MASK(sr_map
));
1289 object
= vm_object_allocate(obj_size
);
1290 if (object
== VM_OBJECT_NULL
) {
1291 kr
= KERN_RESOURCE_SHORTAGE
;
1296 vm_map_round_page(mappings
[i
].sfm_size
,
1297 VM_MAP_PAGE_MASK(sr_map
)),
1301 VM_KERN_MEMORY_NONE
,
1305 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1306 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1307 VM_INHERIT_DEFAULT
);
1310 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1311 kr
= vm_map_enter_mem_object(
1314 vm_map_round_page(mappings
[i
].sfm_size
,
1315 VM_MAP_PAGE_MASK(sr_map
)),
1319 VM_KERN_MEMORY_NONE
,
1321 mappings
[i
].sfm_file_offset
,
1323 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1324 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1325 VM_INHERIT_DEFAULT
);
1329 if (kr
== KERN_SUCCESS
) {
1331 * Record the first (chronologically) successful
1332 * mapping in this shared region.
1333 * We're protected by "sr_mapping_in_progress" here,
1334 * so no need to lock "shared_region".
1336 if (first_mapping
== (mach_vm_offset_t
) -1) {
1337 first_mapping
= target_address
;
1340 if ((slid_mapping
== (mach_vm_offset_t
) -1) &&
1341 (mapping_to_slide
== &mappings
[i
])) {
1342 slid_mapping
= target_address
;
1346 * Record the lowest writable address in this
1347 * sub map, to log any unexpected unnesting below
1348 * that address (see log_unnest_badness()).
1350 if ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) &&
1351 sr_map
->is_nested_map
&&
1352 (lowest_unnestable_addr
== 0 ||
1353 (target_address
< lowest_unnestable_addr
))) {
1354 lowest_unnestable_addr
= target_address
;
1357 if (map_port
== MACH_PORT_NULL
) {
1359 * Get rid of the VM object we just created
1360 * but failed to map.
1362 vm_object_deallocate(object
);
1363 object
= VM_OBJECT_NULL
;
1365 if (kr
== KERN_MEMORY_PRESENT
) {
1367 * This exact mapping was already there:
1370 SHARED_REGION_TRACE_INFO(
1371 ("shared_region: mapping[%d]: "
1372 "address:0x%016llx size:0x%016llx "
1374 "maxprot:0x%x prot:0x%x "
1375 "already mapped...\n",
1377 (long long)mappings
[i
].sfm_address
,
1378 (long long)mappings
[i
].sfm_size
,
1379 (long long)mappings
[i
].sfm_file_offset
,
1380 mappings
[i
].sfm_max_prot
,
1381 mappings
[i
].sfm_init_prot
));
1383 * We didn't establish this mapping ourselves;
1384 * let's reset its size, so that we do not
1385 * attempt to undo it if an error occurs later.
1387 mappings
[i
].sfm_size
= 0;
1390 /* this mapping failed ! */
1391 SHARED_REGION_TRACE_ERROR(
1392 ("shared_region: mapping[%d]: "
1393 "address:0x%016llx size:0x%016llx "
1395 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1397 (long long)mappings
[i
].sfm_address
,
1398 (long long)mappings
[i
].sfm_size
,
1399 (long long)mappings
[i
].sfm_file_offset
,
1400 mappings
[i
].sfm_max_prot
,
1401 mappings
[i
].sfm_init_prot
,
1404 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1412 if (kr
== KERN_SUCCESS
&&
1414 mapping_to_slide
!= NULL
) {
1415 kr
= vm_shared_region_slide(slide
,
1416 mapping_to_slide
->sfm_file_offset
,
1417 mapping_to_slide
->sfm_size
,
1422 if (kr
!= KERN_SUCCESS
) {
1423 SHARED_REGION_TRACE_ERROR(
1424 ("shared_region: region_slide("
1425 "slide:0x%x start:0x%016llx "
1426 "size:0x%016llx) failed 0x%x\n",
1428 (long long)slide_start
,
1429 (long long)slide_size
,
1431 vm_shared_region_undo_mappings(sr_map
,
1438 if (kr
== KERN_SUCCESS
) {
1439 /* adjust the map's "lowest_unnestable_start" */
1440 lowest_unnestable_addr
&= ~(pmap_nesting_size_min
-1);
1441 if (lowest_unnestable_addr
!=
1442 sr_map
->lowest_unnestable_start
) {
1443 vm_map_lock(sr_map
);
1444 sr_map
->lowest_unnestable_start
=
1445 lowest_unnestable_addr
;
1446 vm_map_unlock(sr_map
);
1450 vm_shared_region_lock();
1451 assert(shared_region
->sr_ref_count
> 1);
1452 assert(shared_region
->sr_mapping_in_progress
);
1454 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1455 if (kr
== KERN_SUCCESS
&&
1456 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1457 shared_region
->sr_first_mapping
= first_mapping
;
1461 * copy in the shared region UUID to the shared region structure.
1462 * we do this indirectly by first copying in the shared cache header
1463 * and then copying the UUID from there because we'll need to look
1464 * at other content from the shared cache header.
1466 if (kr
== KERN_SUCCESS
&& !shared_region
->sr_uuid_copied
) {
1467 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
),
1468 (char *)&sr_cache_header
,
1469 sizeof(sr_cache_header
));
1471 memcpy(&shared_region
->sr_uuid
, &sr_cache_header
.uuid
, sizeof(shared_region
->sr_uuid
));
1472 shared_region
->sr_uuid_copied
= TRUE
;
1474 #if DEVELOPMENT || DEBUG
1475 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1476 "offset:0 size:0x%016llx) failed with %d\n",
1477 (long long)shared_region
->sr_base_address
,
1478 (long long)shared_region
->sr_first_mapping
,
1479 (long long)sizeof(sr_cache_header
),
1481 #endif /* DEVELOPMENT || DEBUG */
1482 shared_region
->sr_uuid_copied
= FALSE
;
1487 * If the shared cache is associated with the init task (and is therefore the system shared cache),
1488 * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
1490 boolean_t is_init_task
= (task_pid(current_task()) == 1);
1491 if (shared_region
->sr_uuid_copied
&& is_init_task
) {
1492 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1493 if (sr_cache_header
.locallyBuiltCache
) {
1494 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_START
);
1495 size_t image_array_length
= (sr_cache_header
.imagesTextCount
* sizeof(struct _dyld_cache_image_text_info
));
1496 struct _dyld_cache_image_text_info
*sr_image_layout
= kalloc(image_array_length
);
1497 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1498 sr_cache_header
.imagesTextOffset
), (char *)sr_image_layout
, image_array_length
);
1500 shared_region
->sr_images
= kalloc(sr_cache_header
.imagesTextCount
* sizeof(struct dyld_uuid_info_64
));
1501 for (size_t index
= 0; index
< sr_cache_header
.imagesTextCount
; index
++) {
1502 memcpy((char *)&shared_region
->sr_images
[index
].imageUUID
, (char *)&sr_image_layout
[index
].uuid
,
1503 sizeof(shared_region
->sr_images
[index
].imageUUID
));
1504 shared_region
->sr_images
[index
].imageLoadAddress
= sr_image_layout
[index
].loadAddress
;
1507 assert(sr_cache_header
.imagesTextCount
< UINT32_MAX
);
1508 shared_region
->sr_images_count
= (uint32_t) sr_cache_header
.imagesTextCount
;
1510 #if DEVELOPMENT || DEBUG
1511 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1512 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1513 (long long)shared_region
->sr_base_address
,
1514 (long long)shared_region
->sr_first_mapping
,
1515 (long long)sr_cache_header
.imagesTextOffset
,
1516 (long long)image_array_length
,
1518 #endif /* DEVELOPMENT || DEBUG */
1520 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_END
, shared_region
->sr_images_count
);
1521 kfree(sr_image_layout
, image_array_length
);
1522 sr_image_layout
= NULL
;
1524 init_task_shared_region
= shared_region
;
1527 if (kr
== KERN_SUCCESS
) {
1529 * If we succeeded, we know the bounds of the shared region.
1530 * Trim our pmaps to only cover this range (if applicable to
1533 pmap_trim(current_map()->pmap
, sr_map
->pmap
, sfm_min_address
, sfm_min_address
, sfm_max_address
- sfm_min_address
);
1536 /* we're done working on that shared region */
1537 shared_region
->sr_mapping_in_progress
= FALSE
;
1538 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1539 vm_shared_region_unlock();
1542 SHARED_REGION_TRACE_DEBUG(
1543 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1544 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1545 (void *)VM_KERNEL_ADDRPERM(mappings
),
1546 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1551 * Retrieve a task's shared region and grab an extra reference to
1552 * make sure it doesn't disappear while the caller is using it.
1553 * The caller is responsible for consuming that extra reference if
1556 * This also tries to trim the pmap for the shared region.
1559 vm_shared_region_trim_and_get(task_t task
)
1561 vm_shared_region_t shared_region
;
1562 ipc_port_t sr_handle
;
1563 vm_named_entry_t sr_mem_entry
;
1566 /* Get the shared region and the map. */
1567 shared_region
= vm_shared_region_get(task
);
1568 if (shared_region
== NULL
) {
1572 sr_handle
= shared_region
->sr_mem_entry
;
1573 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1574 sr_map
= sr_mem_entry
->backing
.map
;
1576 /* Trim the pmap if possible. */
1577 pmap_trim(task
->map
->pmap
, sr_map
->pmap
, 0, 0, 0);
1579 return shared_region
;
1583 * Enter the appropriate shared region into "map" for "task".
1584 * This involves looking up the shared region (and possibly creating a new
1585 * one) for the desired environment, then mapping the VM sub map into the
1586 * task's VM "map", with the appropriate level of pmap-nesting.
1589 vm_shared_region_enter(
1590 struct _vm_map
*map
,
1595 cpu_subtype_t cpu_subtype
)
1598 vm_shared_region_t shared_region
;
1599 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1600 vm_map_size_t sr_size
, mapping_size
;
1601 vm_map_offset_t sr_pmap_nesting_start
;
1602 vm_map_size_t sr_pmap_nesting_size
;
1603 ipc_port_t sr_handle
;
1604 vm_prot_t cur_prot
, max_prot
;
1606 SHARED_REGION_TRACE_DEBUG(
1607 ("shared_region: -> "
1608 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1609 (void *)VM_KERNEL_ADDRPERM(map
),
1610 (void *)VM_KERNEL_ADDRPERM(task
),
1611 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1612 cpu
, cpu_subtype
, is_64bit
));
1614 /* lookup (create if needed) the shared region for this environment */
1615 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, cpu_subtype
, is_64bit
);
1616 if (shared_region
== NULL
) {
1617 /* this should not happen ! */
1618 SHARED_REGION_TRACE_ERROR(
1619 ("shared_region: -> "
1620 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): "
1621 "lookup failed !\n",
1622 (void *)VM_KERNEL_ADDRPERM(map
),
1623 (void *)VM_KERNEL_ADDRPERM(task
),
1624 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1625 cpu
, cpu_subtype
, is_64bit
));
1626 //panic("shared_region_enter: lookup failed\n");
1627 return KERN_FAILURE
;
1631 /* no need to lock since this data is never modified */
1632 sr_address
= shared_region
->sr_base_address
;
1633 sr_size
= shared_region
->sr_size
;
1634 sr_handle
= shared_region
->sr_mem_entry
;
1635 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1636 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1638 cur_prot
= VM_PROT_READ
;
1641 * XXX BINARY COMPATIBILITY
1642 * java6 apparently needs to modify some code in the
1643 * dyld shared cache and needs to be allowed to add
1646 max_prot
= VM_PROT_ALL
;
1647 #else /* __x86_64__ */
1648 max_prot
= VM_PROT_READ
;
1649 #endif /* __x86_64__ */
1651 * Start mapping the shared region's VM sub map into the task's VM map.
1655 if (sr_pmap_nesting_start
> sr_address
) {
1656 /* we need to map a range without pmap-nesting first */
1657 target_address
= sr_address
;
1658 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1659 kr
= vm_map_enter_mem_object(
1665 VM_MAP_KERNEL_FLAGS_NONE
,
1666 VM_KERN_MEMORY_NONE
,
1673 if (kr
!= KERN_SUCCESS
) {
1674 SHARED_REGION_TRACE_ERROR(
1675 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1676 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1677 (void *)VM_KERNEL_ADDRPERM(map
),
1678 (void *)VM_KERNEL_ADDRPERM(task
),
1679 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1680 cpu
, cpu_subtype
, is_64bit
,
1681 (long long)target_address
,
1682 (long long)mapping_size
,
1683 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1686 SHARED_REGION_TRACE_DEBUG(
1687 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1688 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1689 (void *)VM_KERNEL_ADDRPERM(map
),
1690 (void *)VM_KERNEL_ADDRPERM(task
),
1691 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1692 cpu
, cpu_subtype
, is_64bit
,
1693 (long long)target_address
, (long long)mapping_size
,
1694 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1695 sr_offset
+= mapping_size
;
1696 sr_size
-= mapping_size
;
1699 * We may need to map several pmap-nested portions, due to platform
1700 * specific restrictions on pmap nesting.
1701 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1704 sr_pmap_nesting_size
> 0;
1705 sr_offset
+= mapping_size
,
1706 sr_size
-= mapping_size
,
1707 sr_pmap_nesting_size
-= mapping_size
) {
1708 target_address
= sr_address
+ sr_offset
;
1709 mapping_size
= sr_pmap_nesting_size
;
1710 if (mapping_size
> pmap_nesting_size_max
) {
1711 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1713 kr
= vm_map_enter_mem_object(
1719 VM_MAP_KERNEL_FLAGS_NONE
,
1720 VM_MEMORY_SHARED_PMAP
,
1727 if (kr
!= KERN_SUCCESS
) {
1728 SHARED_REGION_TRACE_ERROR(
1729 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1730 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1731 (void *)VM_KERNEL_ADDRPERM(map
),
1732 (void *)VM_KERNEL_ADDRPERM(task
),
1733 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1734 cpu
, cpu_subtype
, is_64bit
,
1735 (long long)target_address
,
1736 (long long)mapping_size
,
1737 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1740 SHARED_REGION_TRACE_DEBUG(
1741 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1742 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1743 (void *)VM_KERNEL_ADDRPERM(map
),
1744 (void *)VM_KERNEL_ADDRPERM(task
),
1745 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1746 cpu
, cpu_subtype
, is_64bit
,
1747 (long long)target_address
, (long long)mapping_size
,
1748 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1751 /* and there's some left to be mapped without pmap-nesting */
1752 target_address
= sr_address
+ sr_offset
;
1753 mapping_size
= sr_size
;
1754 kr
= vm_map_enter_mem_object(
1760 VM_MAP_KERNEL_FLAGS_NONE
,
1761 VM_KERN_MEMORY_NONE
,
1768 if (kr
!= KERN_SUCCESS
) {
1769 SHARED_REGION_TRACE_ERROR(
1770 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1771 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1772 (void *)VM_KERNEL_ADDRPERM(map
),
1773 (void *)VM_KERNEL_ADDRPERM(task
),
1774 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1775 cpu
, cpu_subtype
, is_64bit
,
1776 (long long)target_address
,
1777 (long long)mapping_size
,
1778 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1781 SHARED_REGION_TRACE_DEBUG(
1782 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1783 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1784 (void *)VM_KERNEL_ADDRPERM(map
),
1785 (void *)VM_KERNEL_ADDRPERM(task
),
1786 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1787 cpu
, cpu_subtype
, is_64bit
,
1788 (long long)target_address
, (long long)mapping_size
,
1789 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1790 sr_offset
+= mapping_size
;
1791 sr_size
-= mapping_size
;
1793 assert(sr_size
== 0);
1796 if (kr
== KERN_SUCCESS
) {
1797 /* let the task use that shared region */
1798 vm_shared_region_set(task
, shared_region
);
1800 /* drop our reference since we're not using it */
1801 vm_shared_region_deallocate(shared_region
);
1802 vm_shared_region_set(task
, NULL
);
1805 SHARED_REGION_TRACE_DEBUG(
1806 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
1807 (void *)VM_KERNEL_ADDRPERM(map
),
1808 (void *)VM_KERNEL_ADDRPERM(task
),
1809 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1810 cpu
, cpu_subtype
, is_64bit
, kr
));
1814 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1815 struct vm_shared_region_slide_info slide_info
;
1818 vm_shared_region_sliding_valid(uint32_t slide
)
1820 kern_return_t kr
= KERN_SUCCESS
;
1821 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1823 /* No region yet? we're fine. */
1828 if ((sr
->sr_slid
== TRUE
) && slide
) {
1829 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1830 printf("Only one shared region can be slid\n");
1834 * Request for sliding when we've
1835 * already done it with exactly the
1836 * same slide value before.
1837 * This isn't wrong technically but
1838 * we don't want to slide again and
1839 * so we return this value.
1841 kr
= KERN_INVALID_ARGUMENT
;
1844 vm_shared_region_deallocate(sr
);
1849 vm_shared_region_slide_mapping(
1850 vm_shared_region_t sr
,
1851 mach_vm_size_t slide_info_size
,
1852 mach_vm_offset_t start
,
1853 mach_vm_size_t size
,
1854 mach_vm_offset_t slid_mapping
,
1856 memory_object_control_t sr_file_control
)
1860 vm_shared_region_slide_info_t si
;
1861 vm_offset_t slide_info_entry
;
1862 vm_map_entry_t slid_entry
, tmp_entry
;
1863 struct vm_map_entry tmp_entry_store
;
1864 memory_object_t sr_pager
;
1867 vm_map_kernel_flags_t vmk_flags
;
1868 vm_map_offset_t map_addr
;
1870 tmp_entry
= VM_MAP_ENTRY_NULL
;
1871 sr_pager
= MEMORY_OBJECT_NULL
;
1872 object
= VM_OBJECT_NULL
;
1873 slide_info_entry
= 0;
1875 assert(sr
->sr_slide_in_progress
);
1876 assert(!sr
->sr_slid
);
1878 si
= vm_shared_region_get_slide_info(sr
);
1879 assert(si
->slide_object
== VM_OBJECT_NULL
);
1880 assert(si
->slide_info_entry
== NULL
);
1882 if (sr_file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1883 return KERN_INVALID_ARGUMENT
;
1885 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1886 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1887 return KERN_FAILURE
;
1890 kr
= kmem_alloc(kernel_map
,
1891 (vm_offset_t
*) &slide_info_entry
,
1892 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1893 if (kr
!= KERN_SUCCESS
) {
1897 object
= memory_object_control_to_vm_object(sr_file_control
);
1898 if (object
== VM_OBJECT_NULL
|| object
->internal
) {
1899 object
= VM_OBJECT_NULL
;
1900 kr
= KERN_INVALID_ADDRESS
;
1904 vm_object_lock(object
);
1905 vm_object_reference_locked(object
); /* for si->slide_object */
1906 object
->object_is_shared_cache
= TRUE
;
1907 vm_object_unlock(object
);
1909 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1910 si
->slide_info_size
= slide_info_size
;
1912 assert(slid_mapping
!= (mach_vm_offset_t
) -1);
1913 si
->slid_address
= slid_mapping
+ sr
->sr_base_address
;
1914 si
->slide_object
= object
;
1916 si
->end
= si
->start
+ size
;
1919 /* find the shared region's map entry to slide */
1920 sr_map
= vm_shared_region_vm_map(sr
);
1921 vm_map_lock_read(sr_map
);
1922 if (!vm_map_lookup_entry(sr_map
,
1925 /* no mapping there */
1926 vm_map_unlock(sr_map
);
1927 kr
= KERN_INVALID_ARGUMENT
;
1931 * We might want to clip the entry to cover only the portion that
1932 * needs sliding (offsets si->start to si->end in the shared cache
1933 * file at the bottom of the shadow chain).
1934 * In practice, it seems to cover the entire DATA segment...
1936 tmp_entry_store
= *slid_entry
;
1937 tmp_entry
= &tmp_entry_store
;
1938 slid_entry
= VM_MAP_ENTRY_NULL
;
1939 /* extra ref to keep object alive while map is unlocked */
1940 vm_object_reference(VME_OBJECT(tmp_entry
));
1941 vm_map_unlock_read(sr_map
);
1943 /* create a "shared_region" sliding pager */
1944 sr_pager
= shared_region_pager_setup(VME_OBJECT(tmp_entry
),
1945 VME_OFFSET(tmp_entry
),
1947 if (sr_pager
== NULL
) {
1948 kr
= KERN_RESOURCE_SHORTAGE
;
1952 /* map that pager over the portion of the mapping that needs sliding */
1953 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
1954 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1955 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
1956 map_addr
= tmp_entry
->vme_start
;
1957 kr
= vm_map_enter_mem_object(sr_map
,
1959 (tmp_entry
->vme_end
-
1960 tmp_entry
->vme_start
),
1961 (mach_vm_offset_t
) 0,
1964 VM_KERN_MEMORY_NONE
,
1965 (ipc_port_t
)(uintptr_t) sr_pager
,
1968 tmp_entry
->protection
,
1969 tmp_entry
->max_protection
,
1970 tmp_entry
->inheritance
);
1971 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x\n", kr
);
1972 assertf(map_addr
== tmp_entry
->vme_start
,
1973 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
1975 (uint64_t) tmp_entry
->vme_start
,
1984 * Release the sr_pager reference obtained by
1985 * shared_region_pager_setup().
1986 * The mapping (if it succeeded) is now holding a reference on
1987 * the memory object.
1989 memory_object_deallocate(sr_pager
);
1990 sr_pager
= MEMORY_OBJECT_NULL
;
1993 /* release extra ref on tmp_entry's VM object */
1994 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1995 tmp_entry
= VM_MAP_ENTRY_NULL
;
1998 if (kr
!= KERN_SUCCESS
) {
2000 if (slide_info_entry
) {
2001 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
2002 slide_info_entry
= 0;
2004 if (si
->slide_object
) {
2005 vm_object_deallocate(si
->slide_object
);
2006 si
->slide_object
= VM_OBJECT_NULL
;
2013 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
) {
2014 return (void*)sr
->sr_slide_info
.slide_info_entry
;
2017 static kern_return_t
2018 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info
)
2020 uint32_t pageIndex
=0;
2021 uint16_t entryIndex
=0;
2022 uint16_t *toc
= NULL
;
2024 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2025 for (;pageIndex
< s_info
->toc_count
; pageIndex
++) {
2027 entryIndex
= (uint16_t)(toc
[pageIndex
]);
2029 if (entryIndex
>= s_info
->entry_count
) {
2030 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
2031 return KERN_FAILURE
;
2035 return KERN_SUCCESS
;
2038 static kern_return_t
2039 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info
, mach_vm_size_t slide_info_size
)
2041 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2042 return KERN_FAILURE
;
2045 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2047 uint32_t page_starts_count
= s_info
->page_starts_count
;
2048 uint32_t page_extras_count
= s_info
->page_extras_count
;
2049 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2050 if (num_trailing_entries
< page_starts_count
) {
2051 return KERN_FAILURE
;
2054 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2055 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2056 if (trailing_size
>> 1 != num_trailing_entries
) {
2057 return KERN_FAILURE
;
2060 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2061 if (required_size
< sizeof(*s_info
)) {
2062 return KERN_FAILURE
;
2065 if (required_size
> slide_info_size
) {
2066 return KERN_FAILURE
;
2069 return KERN_SUCCESS
;
2072 static kern_return_t
2073 vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info
, mach_vm_size_t slide_info_size
)
2075 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2076 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info
->page_size
, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE
);
2077 return KERN_FAILURE
;
2080 uint32_t page_starts_count
= s_info
->page_starts_count
;
2081 mach_vm_size_t num_trailing_entries
= page_starts_count
;
2082 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2083 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2084 if (required_size
< sizeof(*s_info
)) {
2085 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)sizeof(*s_info
));
2086 return KERN_FAILURE
;
2089 if (required_size
> slide_info_size
) {
2090 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)slide_info_size
);
2091 return KERN_FAILURE
;
2094 return KERN_SUCCESS
;
2097 static kern_return_t
2098 vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info
, mach_vm_size_t slide_info_size
)
2100 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2101 return KERN_FAILURE
;
2104 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2106 uint32_t page_starts_count
= s_info
->page_starts_count
;
2107 uint32_t page_extras_count
= s_info
->page_extras_count
;
2108 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2109 if (num_trailing_entries
< page_starts_count
) {
2110 return KERN_FAILURE
;
2113 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2114 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2115 if (trailing_size
>> 1 != num_trailing_entries
) {
2116 return KERN_FAILURE
;
2119 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2120 if (required_size
< sizeof(*s_info
)) {
2121 return KERN_FAILURE
;
2124 if (required_size
> slide_info_size
) {
2125 return KERN_FAILURE
;
2128 return KERN_SUCCESS
;
2133 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
2135 vm_shared_region_slide_info_t si
;
2136 vm_shared_region_slide_info_entry_t s_info
;
2139 si
= vm_shared_region_get_slide_info(sr
);
2140 s_info
= si
->slide_info_entry
;
2142 kr
= mach_vm_protect(kernel_map
,
2143 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
2144 (mach_vm_size_t
) si
->slide_info_size
,
2145 TRUE
, VM_PROT_READ
);
2146 if (kr
!= KERN_SUCCESS
) {
2147 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
2150 if (s_info
->version
== 1) {
2151 kr
= vm_shared_region_slide_sanity_check_v1(&s_info
->v1
);
2152 } else if (s_info
->version
== 2) {
2153 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, si
->slide_info_size
);
2154 } else if (s_info
->version
== 3) {
2155 kr
= vm_shared_region_slide_sanity_check_v3(&s_info
->v3
, si
->slide_info_size
);
2156 } else if (s_info
->version
== 4) {
2157 kr
= vm_shared_region_slide_sanity_check_v4(&s_info
->v4
, si
->slide_info_size
);
2161 if (kr
!= KERN_SUCCESS
) {
2165 return KERN_SUCCESS
;
2167 if (si
->slide_info_entry
!= NULL
) {
2168 kmem_free(kernel_map
,
2169 (vm_offset_t
) si
->slide_info_entry
,
2170 (vm_size_t
) si
->slide_info_size
);
2172 vm_object_deallocate(si
->slide_object
);
2173 si
->slide_object
= NULL
;
2177 si
->slide_info_entry
= NULL
;
2178 si
->slide_info_size
= 0;
2180 return KERN_FAILURE
;
2183 static kern_return_t
2184 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2186 uint16_t *toc
= NULL
;
2187 slide_info_entry_toc_t bitmap
= NULL
;
2190 uint32_t slide
= si
->slide
;
2191 int is_64
= task_has_64Bit_addr(current_task());
2193 vm_shared_region_slide_info_entry_v1_t s_info
= &si
->slide_info_entry
->v1
;
2194 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2196 if (pageIndex
>= s_info
->toc_count
) {
2197 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
2199 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
2200 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
2202 if (entryIndex
>= s_info
->entry_count
) {
2203 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
2205 bitmap
= &slide_info_entries
[entryIndex
];
2207 for(i
=0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
2208 b
= bitmap
->entry
[i
];
2210 for (j
=0; j
<8; ++j
) {
2212 uint32_t *ptr_to_slide
;
2215 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
)+(sizeof(uint32_t)*(i
*8 +j
)));
2216 old_value
= *ptr_to_slide
;
2217 *ptr_to_slide
+= slide
;
2218 if (is_64
&& *ptr_to_slide
< old_value
) {
2220 * We just slid the low 32 bits of a 64-bit pointer
2221 * and it looks like there should have been a carry-over
2222 * to the upper 32 bits.
2223 * The sliding failed...
2225 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
2226 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
2227 return KERN_FAILURE
;
2236 return KERN_SUCCESS
;
2239 static kern_return_t
2241 uint8_t *page_content
,
2242 uint16_t start_offset
,
2243 uint32_t slide_amount
,
2244 vm_shared_region_slide_info_entry_v2_t s_info
)
2246 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2248 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2249 const uint32_t value_mask
= ~delta_mask
;
2250 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2251 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2253 uint32_t page_offset
= start_offset
;
2256 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2260 loc
= page_content
+ page_offset
;
2261 memcpy(&value
, loc
, sizeof(value
));
2262 delta
= (value
& delta_mask
) >> delta_shift
;
2263 value
&= value_mask
;
2267 value
+= slide_amount
;
2269 memcpy(loc
, &value
, sizeof(value
));
2270 page_offset
+= delta
;
2273 /* If the offset went past the end of the page, then the slide data is invalid. */
2274 if (page_offset
> last_page_offset
) {
2275 return KERN_FAILURE
;
2277 return KERN_SUCCESS
;
2280 static kern_return_t
2282 uint8_t *page_content
,
2283 uint16_t start_offset
,
2284 uint32_t slide_amount
,
2285 vm_shared_region_slide_info_entry_v2_t s_info
)
2287 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2289 const uint64_t delta_mask
= s_info
->delta_mask
;
2290 const uint64_t value_mask
= ~delta_mask
;
2291 const uint64_t value_add
= s_info
->value_add
;
2292 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2294 uint32_t page_offset
= start_offset
;
2297 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2301 loc
= page_content
+ page_offset
;
2302 memcpy(&value
, loc
, sizeof(value
));
2303 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2304 value
&= value_mask
;
2308 value
+= slide_amount
;
2310 memcpy(loc
, &value
, sizeof(value
));
2311 page_offset
+= delta
;
2314 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2315 /* If a pointer straddling the page boundary needs to be adjusted, then
2316 * add the slide to the lower half. The encoding guarantees that the upper
2317 * half on the next page will need no masking.
2319 * This assumes a little-endian machine and that the region being slid
2320 * never crosses a 4 GB boundary. */
2322 uint8_t *loc
= page_content
+ page_offset
;
2325 memcpy(&value
, loc
, sizeof(value
));
2326 value
+= slide_amount
;
2327 memcpy(loc
, &value
, sizeof(value
));
2328 } else if (page_offset
> last_page_offset
) {
2329 return KERN_FAILURE
;
2332 return KERN_SUCCESS
;
2335 static kern_return_t
2339 uint8_t *page_content
,
2340 uint16_t start_offset
,
2341 uint32_t slide_amount
,
2342 vm_shared_region_slide_info_entry_v2_t s_info
)
2346 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2348 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2351 if (kr
!= KERN_SUCCESS
) {
2352 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2353 pageIndex
, start_offset
, slide_amount
);
2358 static kern_return_t
2359 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2361 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->slide_info_entry
->v2
;
2362 const uint32_t slide_amount
= si
->slide
;
2364 /* The high bits of the delta_mask field are nonzero precisely when the shared
2365 * cache is 64-bit. */
2366 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2368 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2369 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2371 uint8_t *page_content
= (uint8_t *)vaddr
;
2372 uint16_t page_entry
;
2374 if (pageIndex
>= s_info
->page_starts_count
) {
2375 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2376 pageIndex
, s_info
->page_starts_count
);
2377 return KERN_FAILURE
;
2379 page_entry
= page_starts
[pageIndex
];
2381 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2382 return KERN_SUCCESS
;
2385 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2386 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2390 uint16_t page_start_offset
;
2393 if (chain_index
>= s_info
->page_extras_count
) {
2394 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2395 chain_index
, s_info
->page_extras_count
);
2396 return KERN_FAILURE
;
2398 info
= page_extras
[chain_index
];
2399 page_start_offset
= (info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2401 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2402 if (kr
!= KERN_SUCCESS
) {
2403 return KERN_FAILURE
;
2407 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2409 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2412 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2413 if (kr
!= KERN_SUCCESS
) {
2414 return KERN_FAILURE
;
2418 return KERN_SUCCESS
;
2422 static kern_return_t
2423 vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, __unused mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2425 vm_shared_region_slide_info_entry_v3_t s_info
= &si
->slide_info_entry
->v3
;
2426 const uint32_t slide_amount
= si
->slide
;
2428 uint8_t *page_content
= (uint8_t *)vaddr
;
2429 uint16_t page_entry
;
2431 if (pageIndex
>= s_info
->page_starts_count
) {
2432 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2433 pageIndex
, s_info
->page_starts_count
);
2434 return KERN_FAILURE
;
2436 page_entry
= s_info
->page_starts
[pageIndex
];
2438 if (page_entry
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2439 return KERN_SUCCESS
;
2442 uint8_t* rebaseLocation
= page_content
;
2443 uint64_t delta
= page_entry
;
2445 rebaseLocation
+= delta
;
2447 memcpy(&value
, rebaseLocation
, sizeof(value
));
2448 delta
= ( (value
& 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2450 // A pointer is one of :
2452 // uint64_t pointerValue : 51;
2453 // uint64_t offsetToNextPointer : 11;
2454 // uint64_t isBind : 1 = 0;
2455 // uint64_t authenticated : 1 = 0;
2458 // uint32_t offsetFromSharedCacheBase;
2459 // uint16_t diversityData;
2460 // uint16_t hasAddressDiversity : 1;
2461 // uint16_t hasDKey : 1;
2462 // uint16_t hasBKey : 1;
2463 // uint16_t offsetToNextPointer : 11;
2464 // uint16_t isBind : 1;
2465 // uint16_t authenticated : 1 = 1;
2468 bool isBind
= (value
& (1ULL << 62)) == 1;
2470 return KERN_FAILURE
;
2473 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
2475 if (isAuthenticated
) {
2476 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2477 value
= (value
& 0xFFFFFFFF) + slide_amount
;
2478 // Add in the offset from the mach_header
2479 const uint64_t value_add
= s_info
->value_add
;
2483 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2484 // Regular pointer which needs to fit in 51-bits of value.
2485 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2486 // and the bottom 43-bits to be fit in to 51-bits.
2487 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
2488 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
2489 uint64_t targetValue
= ( top8Bits
<< 13 ) | bottom43Bits
;
2490 value
= targetValue
+ slide_amount
;
2493 memcpy(rebaseLocation
, &value
, sizeof(value
));
2494 } while (delta
!= 0);
2496 return KERN_SUCCESS
;
2499 static kern_return_t
2501 uint8_t *page_content
,
2502 uint16_t start_offset
,
2503 uint32_t slide_amount
,
2504 vm_shared_region_slide_info_entry_v4_t s_info
)
2506 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2508 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2509 const uint32_t value_mask
= ~delta_mask
;
2510 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2511 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2513 uint32_t page_offset
= start_offset
;
2516 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2520 loc
= page_content
+ page_offset
;
2521 memcpy(&value
, loc
, sizeof(value
));
2522 delta
= (value
& delta_mask
) >> delta_shift
;
2523 value
&= value_mask
;
2525 if ( (value
& 0xFFFF8000) == 0 ) {
2526 // small positive non-pointer, use as-is
2527 } else if ( (value
& 0x3FFF8000) == 0x3FFF8000 ) {
2528 // small negative non-pointer
2529 value
|= 0xC0000000;
2531 // pointer that needs rebasing
2533 value
+= slide_amount
;
2535 memcpy(loc
, &value
, sizeof(value
));
2536 page_offset
+= delta
;
2539 /* If the offset went past the end of the page, then the slide data is invalid. */
2540 if (page_offset
> last_page_offset
) {
2541 return KERN_FAILURE
;
2543 return KERN_SUCCESS
;
2546 static kern_return_t
2547 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2549 vm_shared_region_slide_info_entry_v4_t s_info
= &si
->slide_info_entry
->v4
;
2550 const uint32_t slide_amount
= si
->slide
;
2552 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2553 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2555 uint8_t *page_content
= (uint8_t *)vaddr
;
2556 uint16_t page_entry
;
2558 if (pageIndex
>= s_info
->page_starts_count
) {
2559 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2560 pageIndex
, s_info
->page_starts_count
);
2561 return KERN_FAILURE
;
2563 page_entry
= page_starts
[pageIndex
];
2565 if (page_entry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2566 return KERN_SUCCESS
;
2569 if (page_entry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2570 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
;
2574 uint16_t page_start_offset
;
2577 if (chain_index
>= s_info
->page_extras_count
) {
2578 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2579 chain_index
, s_info
->page_extras_count
);
2580 return KERN_FAILURE
;
2582 info
= page_extras
[chain_index
];
2583 page_start_offset
= (info
& DYLD_CACHE_SLIDE4_PAGE_INDEX
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2585 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2586 if (kr
!= KERN_SUCCESS
) {
2587 return KERN_FAILURE
;
2591 } while (!(info
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
));
2593 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2596 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2597 if (kr
!= KERN_SUCCESS
) {
2598 return KERN_FAILURE
;
2602 return KERN_SUCCESS
;
2608 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2610 if (si
->slide_info_entry
->version
== 1) {
2611 return vm_shared_region_slide_page_v1(si
, vaddr
, pageIndex
);
2612 } else if (si
->slide_info_entry
->version
== 2) {
2613 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2614 } else if (si
->slide_info_entry
->version
== 3) {
2615 return vm_shared_region_slide_page_v3(si
, vaddr
, uservaddr
, pageIndex
);
2616 } else if (si
->slide_info_entry
->version
== 4) {
2617 return vm_shared_region_slide_page_v4(si
, vaddr
, pageIndex
);
2619 return KERN_FAILURE
;
2623 /******************************************************************************/
2624 /* Comm page support */
2625 /******************************************************************************/
2627 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2628 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2629 vm_named_entry_t commpage32_entry
= NULL
;
2630 vm_named_entry_t commpage64_entry
= NULL
;
2631 vm_map_t commpage32_map
= VM_MAP_NULL
;
2632 vm_map_t commpage64_map
= VM_MAP_NULL
;
2634 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2635 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2636 vm_named_entry_t commpage_text32_entry
= NULL
;
2637 vm_named_entry_t commpage_text64_entry
= NULL
;
2638 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2639 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
2641 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
2642 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
2644 #if defined(__i386__) || defined(__x86_64__)
2646 * Create a memory entry, VM submap and pmap for one commpage.
2650 ipc_port_t
*handlep
,
2654 vm_named_entry_t mem_entry
;
2657 SHARED_REGION_TRACE_DEBUG(
2658 ("commpage: -> _init(0x%llx)\n",
2661 kr
= mach_memory_entry_allocate(&mem_entry
,
2663 if (kr
!= KERN_SUCCESS
) {
2664 panic("_vm_commpage_init: could not allocate mem_entry");
2666 new_map
= vm_map_create(pmap_create(NULL
, 0, 0), 0, size
, TRUE
);
2667 if (new_map
== VM_MAP_NULL
) {
2668 panic("_vm_commpage_init: could not allocate VM map");
2670 mem_entry
->backing
.map
= new_map
;
2671 mem_entry
->internal
= TRUE
;
2672 mem_entry
->is_sub_map
= TRUE
;
2673 mem_entry
->offset
= 0;
2674 mem_entry
->protection
= VM_PROT_ALL
;
2675 mem_entry
->size
= size
;
2677 SHARED_REGION_TRACE_DEBUG(
2678 ("commpage: _init(0x%llx) <- %p\n",
2679 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
2685 *Initialize the comm text pages at boot time
2687 extern u_int32_t
random(void);
2689 vm_commpage_text_init(void)
2691 SHARED_REGION_TRACE_DEBUG(
2692 ("commpage text: ->init()\n"));
2693 #if defined(__i386__) || defined(__x86_64__)
2694 /* create the 32 bit comm text page */
2695 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
2696 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2697 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
2698 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
2699 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
2700 /* XXX if (cpu_is_64bit_capable()) ? */
2701 /* create the 64-bit comm page */
2702 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
2703 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2704 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
2705 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
2706 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
2708 commpage_text_populate();
2709 #elif defined(__arm64__) || defined(__arm__)
2711 #error Unknown architecture.
2712 #endif /* __i386__ || __x86_64__ */
2713 /* populate the routines in here */
2714 SHARED_REGION_TRACE_DEBUG(
2715 ("commpage text: init() <-\n"));
2720 * Initialize the comm pages at boot time.
2723 vm_commpage_init(void)
2725 SHARED_REGION_TRACE_DEBUG(
2726 ("commpage: -> init()\n"));
2728 #if defined(__i386__) || defined(__x86_64__)
2729 /* create the 32-bit comm page */
2730 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
2731 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
2732 commpage32_map
= commpage32_entry
->backing
.map
;
2734 /* XXX if (cpu_is_64bit_capable()) ? */
2735 /* create the 64-bit comm page */
2736 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
2737 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
2738 commpage64_map
= commpage64_entry
->backing
.map
;
2740 #endif /* __i386__ || __x86_64__ */
2742 /* populate them according to this specific platform */
2743 commpage_populate();
2744 __commpage_setup
= 1;
2745 #if defined(__i386__) || defined(__x86_64__)
2746 if (__system_power_source
== 0) {
2747 post_sys_powersource_internal(0, 1);
2749 #endif /* __i386__ || __x86_64__ */
2751 SHARED_REGION_TRACE_DEBUG(
2752 ("commpage: init() <-\n"));
2756 * Enter the appropriate comm page into the task's address space.
2757 * This is called at exec() time via vm_map_exec().
2765 #if defined(__arm__)
2766 #pragma unused(is64bit)
2769 return KERN_SUCCESS
;
2770 #elif defined(__arm64__)
2771 #pragma unused(is64bit)
2774 pmap_insert_sharedpage(vm_map_pmap(map
));
2775 return KERN_SUCCESS
;
2777 ipc_port_t commpage_handle
, commpage_text_handle
;
2778 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
2779 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
2781 vm_map_kernel_flags_t vmk_flags
;
2784 SHARED_REGION_TRACE_DEBUG(
2785 ("commpage: -> enter(%p,%p)\n",
2786 (void *)VM_KERNEL_ADDRPERM(map
),
2787 (void *)VM_KERNEL_ADDRPERM(task
)));
2789 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
2790 /* the comm page is likely to be beyond the actual end of the VM map */
2791 vm_flags
= VM_FLAGS_FIXED
;
2792 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2793 vmk_flags
.vmkf_beyond_max
= TRUE
;
2795 /* select the appropriate comm page for this task */
2796 assert(! (is64bit
^ vm_map_is_64bit(map
)));
2798 commpage_handle
= commpage64_handle
;
2799 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
2800 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
2801 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
2802 objc_address
= _COMM_PAGE64_OBJC_BASE
;
2803 commpage_text_handle
= commpage_text64_handle
;
2804 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
2806 commpage_handle
= commpage32_handle
;
2808 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
2809 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
2810 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
2811 objc_address
= _COMM_PAGE32_OBJC_BASE
;
2812 commpage_text_handle
= commpage_text32_handle
;
2813 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
2816 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
2817 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
2818 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
2819 /* the commpage is properly aligned or sized for pmap-nesting */
2820 tag
= VM_MEMORY_SHARED_PMAP
;
2822 /* map the comm page in the task's address space */
2823 assert(commpage_handle
!= IPC_PORT_NULL
);
2824 kr
= vm_map_enter_mem_object(
2838 if (kr
!= KERN_SUCCESS
) {
2839 SHARED_REGION_TRACE_ERROR(
2840 ("commpage: enter(%p,0x%llx,0x%llx) "
2841 "commpage %p mapping failed 0x%x\n",
2842 (void *)VM_KERNEL_ADDRPERM(map
),
2843 (long long)commpage_address
,
2844 (long long)commpage_size
,
2845 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
2848 /* map the comm text page in the task's address space */
2849 assert(commpage_text_handle
!= IPC_PORT_NULL
);
2850 kr
= vm_map_enter_mem_object(
2852 &commpage_text_address
,
2858 commpage_text_handle
,
2861 VM_PROT_READ
|VM_PROT_EXECUTE
,
2862 VM_PROT_READ
|VM_PROT_EXECUTE
,
2864 if (kr
!= KERN_SUCCESS
) {
2865 SHARED_REGION_TRACE_ERROR(
2866 ("commpage text: enter(%p,0x%llx,0x%llx) "
2867 "commpage text %p mapping failed 0x%x\n",
2868 (void *)VM_KERNEL_ADDRPERM(map
),
2869 (long long)commpage_text_address
,
2870 (long long)commpage_text_size
,
2871 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2875 * Since we're here, we also pre-allocate some virtual space for the
2876 * Objective-C run-time, if needed...
2878 if (objc_size
!= 0) {
2879 kr
= vm_map_enter_mem_object(
2892 VM_INHERIT_DEFAULT
);
2893 if (kr
!= KERN_SUCCESS
) {
2894 SHARED_REGION_TRACE_ERROR(
2895 ("commpage: enter(%p,0x%llx,0x%llx) "
2896 "objc mapping failed 0x%x\n",
2897 (void *)VM_KERNEL_ADDRPERM(map
),
2898 (long long)objc_address
,
2899 (long long)objc_size
, kr
));
2903 SHARED_REGION_TRACE_DEBUG(
2904 ("commpage: enter(%p,%p) <- 0x%x\n",
2905 (void *)VM_KERNEL_ADDRPERM(map
),
2906 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2912 vm_shared_region_slide(uint32_t slide
,
2913 mach_vm_offset_t entry_start_address
,
2914 mach_vm_size_t entry_size
,
2915 mach_vm_offset_t slide_start
,
2916 mach_vm_size_t slide_size
,
2917 mach_vm_offset_t slid_mapping
,
2918 memory_object_control_t sr_file_control
)
2920 void *slide_info_entry
= NULL
;
2922 vm_shared_region_t sr
;
2924 SHARED_REGION_TRACE_DEBUG(
2925 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2926 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2928 sr
= vm_shared_region_get(current_task());
2930 printf("%s: no shared region?\n", __FUNCTION__
);
2931 SHARED_REGION_TRACE_DEBUG(
2932 ("vm_shared_region_slide: <- %d (no shared region)\n",
2934 return KERN_FAILURE
;
2938 * Protect from concurrent access.
2940 vm_shared_region_lock();
2941 while(sr
->sr_slide_in_progress
) {
2942 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
2945 #ifndef CONFIG_EMBEDDED
2946 || shared_region_completed_slide
2949 vm_shared_region_unlock();
2951 vm_shared_region_deallocate(sr
);
2952 printf("%s: shared region already slid?\n", __FUNCTION__
);
2953 SHARED_REGION_TRACE_DEBUG(
2954 ("vm_shared_region_slide: <- %d (already slid)\n",
2956 return KERN_FAILURE
;
2959 sr
->sr_slide_in_progress
= TRUE
;
2960 vm_shared_region_unlock();
2962 error
= vm_shared_region_slide_mapping(sr
,
2964 entry_start_address
,
2970 printf("slide_info initialization failed with kr=%d\n", error
);
2974 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
2975 if (slide_info_entry
== NULL
){
2976 error
= KERN_FAILURE
;
2978 error
= copyin((user_addr_t
)slide_start
,
2980 (vm_size_t
)slide_size
);
2982 error
= KERN_INVALID_ADDRESS
;
2989 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
2990 error
= KERN_INVALID_ARGUMENT
;
2991 printf("Sanity Check failed for slide_info\n");
2994 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2995 (void*)(uintptr_t)entry_start_address
,
2996 (unsigned long)entry_size
,
2997 (unsigned long)slide_size
);
3001 vm_shared_region_lock();
3003 assert(sr
->sr_slide_in_progress
);
3004 assert(sr
->sr_slid
== FALSE
);
3005 sr
->sr_slide_in_progress
= FALSE
;
3006 thread_wakeup(&sr
->sr_slide_in_progress
);
3008 if (error
== KERN_SUCCESS
) {
3012 * We don't know how to tear down a slid shared region today, because
3013 * we would have to invalidate all the pages that have been slid
3014 * atomically with respect to anyone mapping the shared region afresh.
3015 * Therefore, take a dangling reference to prevent teardown.
3018 #ifndef CONFIG_EMBEDDED
3019 shared_region_completed_slide
= TRUE
;
3022 vm_shared_region_unlock();
3024 vm_shared_region_deallocate(sr
);
3026 SHARED_REGION_TRACE_DEBUG(
3027 ("vm_shared_region_slide: <- %d\n",
3034 * This is called from powermanagement code to let kernel know the current source of power.
3035 * 0 if it is external source (connected to power )
3036 * 1 if it is internal power source ie battery
3039 #if defined(__i386__) || defined(__x86_64__)
3040 post_sys_powersource(int i
)
3042 post_sys_powersource(__unused
int i
)
3045 #if defined(__i386__) || defined(__x86_64__)
3046 post_sys_powersource_internal(i
, 0);
3047 #endif /* __i386__ || __x86_64__ */
3051 #if defined(__i386__) || defined(__x86_64__)
3053 post_sys_powersource_internal(int i
, int internal
)
3056 __system_power_source
= i
;
3058 if (__commpage_setup
!= 0) {
3059 if (__system_power_source
!= 0)
3060 commpage_set_spin_count(0);
3062 commpage_set_spin_count(MP_SPIN_TRIES
);
3065 #endif /* __i386__ || __x86_64__ */