2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 #if defined (__arm__) || defined(__arm64__)
116 #include <arm/cpu_data_internal.h>
120 * the following codes are used in the subclass
121 * of the DBG_MACH_SHAREDREGION class
123 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
125 #if defined(HAS_APPLE_PAC)
127 #endif /* HAS_APPLE_PAC */
129 /* "dyld" uses this to figure out what the kernel supports */
130 int shared_region_version
= 3;
132 /* trace level, output is sent to the system log file */
133 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
135 /* should local (non-chroot) shared regions persist when no task uses them ? */
136 int shared_region_persistence
= 0; /* no by default */
138 /* delay before reclaiming an unused shared region */
139 int shared_region_destroy_delay
= 120; /* in seconds */
141 struct vm_shared_region
*init_task_shared_region
= NULL
;
143 #ifndef CONFIG_EMBEDDED
145 * Only one cache gets to slide on Desktop, since we can't
146 * tear down slide info properly today and the desktop actually
147 * produces lots of shared caches.
149 boolean_t shared_region_completed_slide
= FALSE
;
152 /* this lock protects all the shared region data structures */
153 lck_grp_t
*vm_shared_region_lck_grp
;
154 lck_mtx_t vm_shared_region_lock
;
156 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
157 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
158 #define vm_shared_region_sleep(event, interruptible) \
159 lck_mtx_sleep(&vm_shared_region_lock, \
164 /* the list of currently available shared regions (one per environment) */
165 queue_head_t vm_shared_region_queue
;
167 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
168 static vm_shared_region_t
vm_shared_region_create(
171 cpu_subtype_t cpu_subtype
,
173 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
175 static void vm_shared_region_timeout(thread_call_param_t param0
,
176 thread_call_param_t param1
);
177 kern_return_t
vm_shared_region_slide_mapping(
178 vm_shared_region_t sr
,
179 mach_vm_size_t slide_info_size
,
180 mach_vm_offset_t start
,
182 mach_vm_offset_t slid_mapping
,
184 memory_object_control_t
); /* forward */
186 static int __commpage_setup
= 0;
188 static int __system_power_source
= 1; /* init to extrnal power source */
189 static void post_sys_powersource_internal(int i
, int internal
);
194 * Initialize the module...
197 vm_shared_region_init(void)
199 SHARED_REGION_TRACE_DEBUG(
200 ("shared_region: -> init\n"));
202 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
204 lck_mtx_init(&vm_shared_region_lock
,
205 vm_shared_region_lck_grp
,
208 queue_init(&vm_shared_region_queue
);
210 SHARED_REGION_TRACE_DEBUG(
211 ("shared_region: <- init\n"));
215 * Retrieve a task's shared region and grab an extra reference to
216 * make sure it doesn't disappear while the caller is using it.
217 * The caller is responsible for consuming that extra reference if
221 vm_shared_region_get(
224 vm_shared_region_t shared_region
;
226 SHARED_REGION_TRACE_DEBUG(
227 ("shared_region: -> get(%p)\n",
228 (void *)VM_KERNEL_ADDRPERM(task
)));
231 vm_shared_region_lock();
232 shared_region
= task
->shared_region
;
234 assert(shared_region
->sr_ref_count
> 0);
235 vm_shared_region_reference_locked(shared_region
);
237 vm_shared_region_unlock();
240 SHARED_REGION_TRACE_DEBUG(
241 ("shared_region: get(%p) <- %p\n",
242 (void *)VM_KERNEL_ADDRPERM(task
),
243 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
245 return shared_region
;
249 * Get the base address of the shared region.
250 * That's the address at which it needs to be mapped in the process's address
252 * No need to lock since this data is set when the shared region is
253 * created and is never modified after that. The caller must hold an extra
254 * reference on the shared region to prevent it from being destroyed.
257 vm_shared_region_base_address(
258 vm_shared_region_t shared_region
)
260 SHARED_REGION_TRACE_DEBUG(
261 ("shared_region: -> base_address(%p)\n",
262 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
263 assert(shared_region
->sr_ref_count
> 1);
264 SHARED_REGION_TRACE_DEBUG(
265 ("shared_region: base_address(%p) <- 0x%llx\n",
266 (void *)VM_KERNEL_ADDRPERM(shared_region
),
267 (long long)shared_region
->sr_base_address
));
268 return shared_region
->sr_base_address
;
272 * Get the size of the shared region.
273 * That's the size that needs to be mapped in the process's address
275 * No need to lock since this data is set when the shared region is
276 * created and is never modified after that. The caller must hold an extra
277 * reference on the shared region to prevent it from being destroyed.
280 vm_shared_region_size(
281 vm_shared_region_t shared_region
)
283 SHARED_REGION_TRACE_DEBUG(
284 ("shared_region: -> size(%p)\n",
285 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
286 assert(shared_region
->sr_ref_count
> 1);
287 SHARED_REGION_TRACE_DEBUG(
288 ("shared_region: size(%p) <- 0x%llx\n",
289 (void *)VM_KERNEL_ADDRPERM(shared_region
),
290 (long long)shared_region
->sr_size
));
291 return shared_region
->sr_size
;
295 * Get the memory entry of the shared region.
296 * That's the "memory object" that needs to be mapped in the process's address
298 * No need to lock since this data is set when the shared region is
299 * created and is never modified after that. The caller must hold an extra
300 * reference on the shared region to prevent it from being destroyed.
303 vm_shared_region_mem_entry(
304 vm_shared_region_t shared_region
)
306 SHARED_REGION_TRACE_DEBUG(
307 ("shared_region: -> mem_entry(%p)\n",
308 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
309 assert(shared_region
->sr_ref_count
> 1);
310 SHARED_REGION_TRACE_DEBUG(
311 ("shared_region: mem_entry(%p) <- %p\n",
312 (void *)VM_KERNEL_ADDRPERM(shared_region
),
313 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
314 return shared_region
->sr_mem_entry
;
318 vm_shared_region_vm_map(
319 vm_shared_region_t shared_region
)
321 ipc_port_t sr_handle
;
322 vm_named_entry_t sr_mem_entry
;
325 SHARED_REGION_TRACE_DEBUG(
326 ("shared_region: -> vm_map(%p)\n",
327 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
328 assert(shared_region
->sr_ref_count
> 1);
330 sr_handle
= shared_region
->sr_mem_entry
;
331 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
332 sr_map
= sr_mem_entry
->backing
.map
;
333 assert(sr_mem_entry
->is_sub_map
);
335 SHARED_REGION_TRACE_DEBUG(
336 ("shared_region: vm_map(%p) <- %p\n",
337 (void *)VM_KERNEL_ADDRPERM(shared_region
),
338 (void *)VM_KERNEL_ADDRPERM(sr_map
)));
342 vm_shared_region_get_slide(
343 vm_shared_region_t shared_region
)
345 SHARED_REGION_TRACE_DEBUG(
346 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
347 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
348 assert(shared_region
->sr_ref_count
> 1);
349 SHARED_REGION_TRACE_DEBUG(
350 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
351 (void *)VM_KERNEL_ADDRPERM(shared_region
),
352 shared_region
->sr_slide_info
.slide
));
354 /* 0 if we haven't slid */
355 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
356 shared_region
->sr_slide_info
.slide
== 0);
358 return shared_region
->sr_slide_info
.slide
;
361 vm_shared_region_slide_info_t
362 vm_shared_region_get_slide_info(
363 vm_shared_region_t shared_region
)
365 SHARED_REGION_TRACE_DEBUG(
366 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
367 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
368 assert(shared_region
->sr_ref_count
> 1);
369 SHARED_REGION_TRACE_DEBUG(
370 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
371 (void *)VM_KERNEL_ADDRPERM(shared_region
),
372 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
373 return &shared_region
->sr_slide_info
;
377 * Set the shared region the process should use.
378 * A NULL new shared region means that we just want to release the old
380 * The caller should already have an extra reference on the new shared region
381 * (if any). We release a reference on the old shared region (if any).
384 vm_shared_region_set(
386 vm_shared_region_t new_shared_region
)
388 vm_shared_region_t old_shared_region
;
390 SHARED_REGION_TRACE_DEBUG(
391 ("shared_region: -> set(%p, %p)\n",
392 (void *)VM_KERNEL_ADDRPERM(task
),
393 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
396 vm_shared_region_lock();
398 old_shared_region
= task
->shared_region
;
399 if (new_shared_region
) {
400 assert(new_shared_region
->sr_ref_count
> 0);
403 task
->shared_region
= new_shared_region
;
405 vm_shared_region_unlock();
408 if (old_shared_region
) {
409 assert(old_shared_region
->sr_ref_count
> 0);
410 vm_shared_region_deallocate(old_shared_region
);
413 SHARED_REGION_TRACE_DEBUG(
414 ("shared_region: set(%p) <- old=%p new=%p\n",
415 (void *)VM_KERNEL_ADDRPERM(task
),
416 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
417 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
421 * Lookup up the shared region for the desired environment.
422 * If none is found, create a new (empty) one.
423 * Grab an extra reference on the returned shared region, to make sure
424 * it doesn't get destroyed before the caller is done with it. The caller
425 * is responsible for consuming that extra reference if necessary.
428 vm_shared_region_lookup(
431 cpu_subtype_t cpu_subtype
,
434 vm_shared_region_t shared_region
;
435 vm_shared_region_t new_shared_region
;
437 SHARED_REGION_TRACE_DEBUG(
438 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n",
440 (void *)VM_KERNEL_ADDRPERM(root_dir
),
441 cputype
, cpu_subtype
, is_64bit
));
443 shared_region
= NULL
;
444 new_shared_region
= NULL
;
446 vm_shared_region_lock();
448 queue_iterate(&vm_shared_region_queue
,
452 assert(shared_region
->sr_ref_count
> 0);
453 if (shared_region
->sr_cpu_type
== cputype
&&
454 shared_region
->sr_cpu_subtype
== cpu_subtype
&&
455 shared_region
->sr_root_dir
== root_dir
&&
456 shared_region
->sr_64bit
== is_64bit
) {
457 /* found a match ! */
458 vm_shared_region_reference_locked(shared_region
);
462 if (new_shared_region
== NULL
) {
463 /* no match: create a new one */
464 vm_shared_region_unlock();
465 new_shared_region
= vm_shared_region_create(root_dir
,
469 /* do the lookup again, in case we lost a race */
470 vm_shared_region_lock();
473 /* still no match: use our new one */
474 shared_region
= new_shared_region
;
475 new_shared_region
= NULL
;
476 queue_enter(&vm_shared_region_queue
,
484 vm_shared_region_unlock();
486 if (new_shared_region
) {
488 * We lost a race with someone else to create a new shared
489 * region for that environment. Get rid of our unused one.
491 assert(new_shared_region
->sr_ref_count
== 1);
492 new_shared_region
->sr_ref_count
--;
493 vm_shared_region_destroy(new_shared_region
);
494 new_shared_region
= NULL
;
497 SHARED_REGION_TRACE_DEBUG(
498 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n",
499 (void *)VM_KERNEL_ADDRPERM(root_dir
),
500 cputype
, cpu_subtype
, is_64bit
,
501 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
503 assert(shared_region
->sr_ref_count
> 0);
504 return shared_region
;
508 * Take an extra reference on a shared region.
509 * The vm_shared_region_lock should already be held by the caller.
512 vm_shared_region_reference_locked(
513 vm_shared_region_t shared_region
)
515 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
517 SHARED_REGION_TRACE_DEBUG(
518 ("shared_region: -> reference_locked(%p)\n",
519 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
520 assert(shared_region
->sr_ref_count
> 0);
521 shared_region
->sr_ref_count
++;
523 if (shared_region
->sr_timer_call
!= NULL
) {
526 /* cancel and free any pending timeout */
527 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
529 thread_call_free(shared_region
->sr_timer_call
);
530 shared_region
->sr_timer_call
= NULL
;
531 /* release the reference held by the cancelled timer */
532 shared_region
->sr_ref_count
--;
534 /* the timer will drop the reference and free itself */
538 SHARED_REGION_TRACE_DEBUG(
539 ("shared_region: reference_locked(%p) <- %d\n",
540 (void *)VM_KERNEL_ADDRPERM(shared_region
),
541 shared_region
->sr_ref_count
));
545 * Release a reference on the shared region.
546 * Destroy it if there are no references left.
549 vm_shared_region_deallocate(
550 vm_shared_region_t shared_region
)
552 SHARED_REGION_TRACE_DEBUG(
553 ("shared_region: -> deallocate(%p)\n",
554 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
556 vm_shared_region_lock();
558 assert(shared_region
->sr_ref_count
> 0);
560 if (shared_region
->sr_root_dir
== NULL
) {
562 * Local (i.e. based on the boot volume) shared regions
563 * can persist or not based on the "shared_region_persistence"
565 * Make sure that this one complies.
567 * See comments in vm_shared_region_slide() for notes about
568 * shared regions we have slid (which are not torn down currently).
570 if (shared_region_persistence
&&
571 !shared_region
->sr_persists
) {
572 /* make this one persistent */
573 shared_region
->sr_ref_count
++;
574 shared_region
->sr_persists
= TRUE
;
575 } else if (!shared_region_persistence
&&
576 shared_region
->sr_persists
) {
577 /* make this one no longer persistent */
578 assert(shared_region
->sr_ref_count
> 1);
579 shared_region
->sr_ref_count
--;
580 shared_region
->sr_persists
= FALSE
;
584 assert(shared_region
->sr_ref_count
> 0);
585 shared_region
->sr_ref_count
--;
586 SHARED_REGION_TRACE_DEBUG(
587 ("shared_region: deallocate(%p): ref now %d\n",
588 (void *)VM_KERNEL_ADDRPERM(shared_region
),
589 shared_region
->sr_ref_count
));
591 if (shared_region
->sr_ref_count
== 0) {
594 assert(!shared_region
->sr_slid
);
596 if (shared_region
->sr_timer_call
== NULL
) {
597 /* hold one reference for the timer */
598 assert(!shared_region
->sr_mapping_in_progress
);
599 shared_region
->sr_ref_count
++;
601 /* set up the timer */
602 shared_region
->sr_timer_call
= thread_call_allocate(
603 (thread_call_func_t
) vm_shared_region_timeout
,
604 (thread_call_param_t
) shared_region
);
606 /* schedule the timer */
607 clock_interval_to_deadline(shared_region_destroy_delay
,
610 thread_call_enter_delayed(shared_region
->sr_timer_call
,
613 SHARED_REGION_TRACE_DEBUG(
614 ("shared_region: deallocate(%p): armed timer\n",
615 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
617 vm_shared_region_unlock();
619 /* timer expired: let go of this shared region */
622 * We can't properly handle teardown of a slid object today.
624 assert(!shared_region
->sr_slid
);
627 * Remove it from the queue first, so no one can find
630 queue_remove(&vm_shared_region_queue
,
634 vm_shared_region_unlock();
636 /* ... and destroy it */
637 vm_shared_region_destroy(shared_region
);
638 shared_region
= NULL
;
641 vm_shared_region_unlock();
644 SHARED_REGION_TRACE_DEBUG(
645 ("shared_region: deallocate(%p) <-\n",
646 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
650 vm_shared_region_timeout(
651 thread_call_param_t param0
,
652 __unused thread_call_param_t param1
)
654 vm_shared_region_t shared_region
;
656 shared_region
= (vm_shared_region_t
) param0
;
658 vm_shared_region_deallocate(shared_region
);
662 * Create a new (empty) shared region for a new environment.
664 static vm_shared_region_t
665 vm_shared_region_create(
668 cpu_subtype_t cpu_subtype
,
672 vm_named_entry_t mem_entry
;
673 ipc_port_t mem_entry_port
;
674 vm_shared_region_t shared_region
;
675 vm_shared_region_slide_info_t si
;
677 mach_vm_offset_t base_address
, pmap_nesting_start
;
678 mach_vm_size_t size
, pmap_nesting_size
;
680 SHARED_REGION_TRACE_INFO(
681 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n",
682 (void *)VM_KERNEL_ADDRPERM(root_dir
),
683 cputype
, cpu_subtype
, is_64bit
));
688 mem_entry_port
= IPC_PORT_NULL
;
689 sub_map
= VM_MAP_NULL
;
691 /* create a new shared region structure... */
692 shared_region
= kalloc(sizeof(*shared_region
));
693 if (shared_region
== NULL
) {
694 SHARED_REGION_TRACE_ERROR(
695 ("shared_region: create: couldn't allocate\n"));
699 /* figure out the correct settings for the desired environment */
702 #if defined(__arm64__)
704 base_address
= SHARED_REGION_BASE_ARM64
;
705 size
= SHARED_REGION_SIZE_ARM64
;
706 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
707 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
709 #elif !defined(__arm__)
711 base_address
= SHARED_REGION_BASE_X86_64
;
712 size
= SHARED_REGION_SIZE_X86_64
;
713 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
714 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
716 case CPU_TYPE_POWERPC
:
717 base_address
= SHARED_REGION_BASE_PPC64
;
718 size
= SHARED_REGION_SIZE_PPC64
;
719 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
720 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
724 SHARED_REGION_TRACE_ERROR(
725 ("shared_region: create: unknown cpu type %d\n",
727 kfree(shared_region
, sizeof(*shared_region
));
728 shared_region
= NULL
;
733 #if defined(__arm__) || defined(__arm64__)
735 base_address
= SHARED_REGION_BASE_ARM
;
736 size
= SHARED_REGION_SIZE_ARM
;
737 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
738 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
742 base_address
= SHARED_REGION_BASE_I386
;
743 size
= SHARED_REGION_SIZE_I386
;
744 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
745 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
747 case CPU_TYPE_POWERPC
:
748 base_address
= SHARED_REGION_BASE_PPC
;
749 size
= SHARED_REGION_SIZE_PPC
;
750 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
751 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
755 SHARED_REGION_TRACE_ERROR(
756 ("shared_region: create: unknown cpu type %d\n",
758 kfree(shared_region
, sizeof(*shared_region
));
759 shared_region
= NULL
;
764 /* create a memory entry structure and a Mach port handle */
765 kr
= mach_memory_entry_allocate(&mem_entry
,
767 if (kr
!= KERN_SUCCESS
) {
768 kfree(shared_region
, sizeof(*shared_region
));
769 shared_region
= NULL
;
770 SHARED_REGION_TRACE_ERROR(
771 ("shared_region: create: "
772 "couldn't allocate mem_entry\n"));
776 #if defined(__arm__) || defined(__arm64__)
778 struct pmap
*pmap_nested
;
780 pmap_nested
= pmap_create_options(NULL
, 0, is_64bit
? PMAP_CREATE_64BIT
: 0);
781 if (pmap_nested
!= PMAP_NULL
) {
782 pmap_set_nested(pmap_nested
);
783 sub_map
= vm_map_create(pmap_nested
, 0, size
, TRUE
);
784 #if defined(__arm64__)
786 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
787 /* enforce 16KB alignment of VM map entries */
788 vm_map_set_page_shift(sub_map
,
789 SIXTEENK_PAGE_SHIFT
);
791 #elif (__ARM_ARCH_7K__ >= 2)
792 /* enforce 16KB alignment for watch targets with new ABI */
793 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
794 #endif /* __arm64__ */
796 sub_map
= VM_MAP_NULL
;
800 /* create a VM sub map and its pmap */
801 sub_map
= vm_map_create(pmap_create_options(NULL
, 0, is_64bit
),
805 if (sub_map
== VM_MAP_NULL
) {
806 ipc_port_release_send(mem_entry_port
);
807 kfree(shared_region
, sizeof(*shared_region
));
808 shared_region
= NULL
;
809 SHARED_REGION_TRACE_ERROR(
810 ("shared_region: create: "
811 "couldn't allocate map\n"));
815 assert(!sub_map
->disable_vmentry_reuse
);
816 sub_map
->is_nested_map
= TRUE
;
818 /* make the memory entry point to the VM sub map */
819 mem_entry
->is_sub_map
= TRUE
;
820 mem_entry
->backing
.map
= sub_map
;
821 mem_entry
->size
= size
;
822 mem_entry
->protection
= VM_PROT_ALL
;
824 /* make the shared region point at the memory entry */
825 shared_region
->sr_mem_entry
= mem_entry_port
;
827 /* fill in the shared region's environment and settings */
828 shared_region
->sr_base_address
= base_address
;
829 shared_region
->sr_size
= size
;
830 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
831 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
832 shared_region
->sr_cpu_type
= cputype
;
833 shared_region
->sr_cpu_subtype
= cpu_subtype
;
834 shared_region
->sr_64bit
= is_64bit
;
835 shared_region
->sr_root_dir
= root_dir
;
837 queue_init(&shared_region
->sr_q
);
838 shared_region
->sr_mapping_in_progress
= FALSE
;
839 shared_region
->sr_slide_in_progress
= FALSE
;
840 shared_region
->sr_persists
= FALSE
;
841 shared_region
->sr_slid
= FALSE
;
842 shared_region
->sr_timer_call
= NULL
;
843 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
845 /* grab a reference for the caller */
846 shared_region
->sr_ref_count
= 1;
848 /* And set up slide info */
849 si
= &shared_region
->sr_slide_info
;
853 #if defined(HAS_APPLE_PAC)
854 si
->si_ptrauth
= FALSE
; /* no pointer authentication by default */
855 #endif /* HAS_APPLE_PAC */
856 si
->slide_object
= NULL
;
857 si
->slide_info_size
= 0;
858 si
->slide_info_entry
= NULL
;
860 /* Initialize UUID and other metadata */
861 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
862 shared_region
->sr_uuid_copied
= FALSE
;
863 shared_region
->sr_images_count
= 0;
864 shared_region
->sr_images
= NULL
;
867 SHARED_REGION_TRACE_INFO(
868 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
869 "base=0x%llx,size=0x%llx) <- "
870 "%p mem=(%p,%p) map=%p pmap=%p\n",
871 (void *)VM_KERNEL_ADDRPERM(root_dir
),
872 cputype
, cpu_subtype
, is_64bit
,
873 (long long)base_address
,
875 (void *)VM_KERNEL_ADDRPERM(shared_region
),
876 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
877 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
878 (void *)VM_KERNEL_ADDRPERM(sub_map
),
879 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
881 SHARED_REGION_TRACE_INFO(
882 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
883 "base=0x%llx,size=0x%llx) <- NULL",
884 (void *)VM_KERNEL_ADDRPERM(root_dir
),
885 cputype
, cpu_subtype
, is_64bit
,
886 (long long)base_address
,
889 return shared_region
;
893 * Destroy a now-unused shared region.
894 * The shared region is no longer in the queue and can not be looked up.
897 vm_shared_region_destroy(
898 vm_shared_region_t shared_region
)
900 vm_named_entry_t mem_entry
;
903 SHARED_REGION_TRACE_INFO(
904 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
905 (void *)VM_KERNEL_ADDRPERM(shared_region
),
906 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
907 shared_region
->sr_cpu_type
,
908 shared_region
->sr_cpu_subtype
,
909 shared_region
->sr_64bit
));
911 assert(shared_region
->sr_ref_count
== 0);
912 assert(!shared_region
->sr_persists
);
913 assert(!shared_region
->sr_slid
);
915 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
916 assert(mem_entry
->is_sub_map
);
917 assert(!mem_entry
->internal
);
918 assert(!mem_entry
->is_copy
);
919 map
= mem_entry
->backing
.map
;
922 * Clean up the pmap first. The virtual addresses that were
923 * entered in this possibly "nested" pmap may have different values
924 * than the VM map's min and max offsets, if the VM sub map was
925 * mapped at a non-zero offset in the processes' main VM maps, which
926 * is usually the case, so the clean-up we do in vm_map_destroy() would
930 pmap_remove(map
->pmap
,
931 shared_region
->sr_base_address
,
932 (shared_region
->sr_base_address
+
933 shared_region
->sr_size
));
937 * Release our (one and only) handle on the memory entry.
938 * This will generate a no-senders notification, which will be processed
939 * by ipc_kobject_notify(), which will release the one and only
940 * reference on the memory entry and cause it to be destroyed, along
941 * with the VM sub map and its pmap.
943 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
945 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
947 if (shared_region
->sr_timer_call
) {
948 thread_call_free(shared_region
->sr_timer_call
);
953 * If slid, free those resources. We'll want this eventually,
954 * but can't handle it properly today.
956 si
= &shared_region
->sr_slide_info
;
957 if (si
->slide_info_entry
) {
958 kmem_free(kernel_map
,
959 (vm_offset_t
) si
->slide_info_entry
,
960 (vm_size_t
) si
->slide_info_size
);
961 vm_object_deallocate(si
->slide_object
);
965 /* release the shared region structure... */
966 kfree(shared_region
, sizeof(*shared_region
));
968 SHARED_REGION_TRACE_DEBUG(
969 ("shared_region: destroy(%p) <-\n",
970 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
971 shared_region
= NULL
;
975 * Gets the address of the first (in time) mapping in the shared region.
978 vm_shared_region_start_address(
979 vm_shared_region_t shared_region
,
980 mach_vm_offset_t
*start_address
)
983 mach_vm_offset_t sr_base_address
;
984 mach_vm_offset_t sr_first_mapping
;
986 SHARED_REGION_TRACE_DEBUG(
987 ("shared_region: -> start_address(%p)\n",
988 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
989 assert(shared_region
->sr_ref_count
> 1);
991 vm_shared_region_lock();
994 * Wait if there's another thread establishing a mapping
995 * in this shared region right when we're looking at it.
996 * We want a consistent view of the map...
998 while (shared_region
->sr_mapping_in_progress
) {
999 /* wait for our turn... */
1000 assert(shared_region
->sr_ref_count
> 1);
1001 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1004 assert(!shared_region
->sr_mapping_in_progress
);
1005 assert(shared_region
->sr_ref_count
> 1);
1007 sr_base_address
= shared_region
->sr_base_address
;
1008 sr_first_mapping
= shared_region
->sr_first_mapping
;
1010 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
1011 /* shared region is empty */
1012 kr
= KERN_INVALID_ADDRESS
;
1015 *start_address
= sr_base_address
+ sr_first_mapping
;
1018 vm_shared_region_unlock();
1020 SHARED_REGION_TRACE_DEBUG(
1021 ("shared_region: start_address(%p) <- 0x%llx\n",
1022 (void *)VM_KERNEL_ADDRPERM(shared_region
),
1023 (long long)shared_region
->sr_base_address
));
1029 vm_shared_region_undo_mappings(
1031 mach_vm_offset_t sr_base_address
,
1032 struct shared_file_mapping_np
*mappings
,
1033 unsigned int mappings_count
)
1036 vm_shared_region_t shared_region
= NULL
;
1037 boolean_t reset_shared_region_state
= FALSE
;
1039 shared_region
= vm_shared_region_get(current_task());
1040 if (shared_region
== NULL
) {
1041 printf("Failed to undo mappings because of NULL shared region.\n");
1046 if (sr_map
== NULL
) {
1047 ipc_port_t sr_handle
;
1048 vm_named_entry_t sr_mem_entry
;
1050 vm_shared_region_lock();
1051 assert(shared_region
->sr_ref_count
> 1);
1053 while (shared_region
->sr_mapping_in_progress
) {
1054 /* wait for our turn... */
1055 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1058 assert(!shared_region
->sr_mapping_in_progress
);
1059 assert(shared_region
->sr_ref_count
> 1);
1060 /* let others know we're working in this shared region */
1061 shared_region
->sr_mapping_in_progress
= TRUE
;
1063 vm_shared_region_unlock();
1065 reset_shared_region_state
= TRUE
;
1067 /* no need to lock because this data is never modified... */
1068 sr_handle
= shared_region
->sr_mem_entry
;
1069 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1070 sr_map
= sr_mem_entry
->backing
.map
;
1071 sr_base_address
= shared_region
->sr_base_address
;
1074 * Undo the mappings we've established so far.
1076 for (j
= 0; j
< mappings_count
; j
++) {
1079 if (mappings
[j
].sfm_size
== 0) {
1081 * We didn't establish this
1082 * mapping, so nothing to undo.
1086 SHARED_REGION_TRACE_INFO(
1087 ("shared_region: mapping[%d]: "
1088 "address:0x%016llx "
1091 "maxprot:0x%x prot:0x%x: "
1094 (long long)mappings
[j
].sfm_address
,
1095 (long long)mappings
[j
].sfm_size
,
1096 (long long)mappings
[j
].sfm_file_offset
,
1097 mappings
[j
].sfm_max_prot
,
1098 mappings
[j
].sfm_init_prot
));
1099 kr2
= mach_vm_deallocate(
1101 (mappings
[j
].sfm_address
-
1103 mappings
[j
].sfm_size
);
1104 assert(kr2
== KERN_SUCCESS
);
1107 if (reset_shared_region_state
) {
1108 vm_shared_region_lock();
1109 assert(shared_region
->sr_ref_count
> 1);
1110 assert(shared_region
->sr_mapping_in_progress
);
1111 /* we're done working on that shared region */
1112 shared_region
->sr_mapping_in_progress
= FALSE
;
1113 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1114 vm_shared_region_unlock();
1115 reset_shared_region_state
= FALSE
;
1118 vm_shared_region_deallocate(shared_region
);
1122 * Establish some mappings of a file in the shared region.
1123 * This is used by "dyld" via the shared_region_map_np() system call
1124 * to populate the shared region with the appropriate shared cache.
1126 * One could also call it several times to incrementally load several
1127 * libraries, as long as they do not overlap.
1128 * It will return KERN_SUCCESS if the mappings were successfully established
1129 * or if they were already established identically by another process.
1132 vm_shared_region_map_file(
1133 vm_shared_region_t shared_region
,
1134 unsigned int mappings_count
,
1135 struct shared_file_mapping_np
*mappings
,
1136 memory_object_control_t file_control
,
1137 memory_object_size_t file_size
,
1140 user_addr_t slide_start
,
1141 user_addr_t slide_size
)
1144 vm_object_t file_object
;
1145 ipc_port_t sr_handle
;
1146 vm_named_entry_t sr_mem_entry
;
1148 mach_vm_offset_t sr_base_address
;
1150 mach_port_t map_port
;
1151 vm_map_offset_t target_address
;
1153 vm_object_size_t obj_size
;
1154 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1155 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1156 mach_vm_offset_t slid_mapping
= (mach_vm_offset_t
) -1;
1157 vm_map_offset_t lowest_unnestable_addr
= 0;
1158 vm_map_kernel_flags_t vmk_flags
;
1159 mach_vm_offset_t sfm_min_address
= ~0;
1160 mach_vm_offset_t sfm_max_address
= 0;
1161 mach_vm_offset_t sfm_end
;
1162 struct _dyld_cache_header sr_cache_header
;
1165 if ((shared_region
->sr_64bit
||
1166 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1167 ((slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1168 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1169 __FUNCTION__
, slide
);
1170 kr
= KERN_INVALID_ARGUMENT
;
1173 #endif /* __arm64__ */
1177 vm_shared_region_lock();
1178 assert(shared_region
->sr_ref_count
> 1);
1180 if (shared_region
->sr_root_dir
!= root_dir
) {
1182 * This shared region doesn't match the current root
1183 * directory of this process. Deny the mapping to
1184 * avoid tainting the shared region with something that
1185 * doesn't quite belong into it.
1187 vm_shared_region_unlock();
1188 kr
= KERN_PROTECTION_FAILURE
;
1193 * Make sure we handle only one mapping at a time in a given
1194 * shared region, to avoid race conditions. This should not
1195 * happen frequently...
1197 while (shared_region
->sr_mapping_in_progress
) {
1198 /* wait for our turn... */
1199 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1202 assert(!shared_region
->sr_mapping_in_progress
);
1203 assert(shared_region
->sr_ref_count
> 1);
1204 /* let others know we're working in this shared region */
1205 shared_region
->sr_mapping_in_progress
= TRUE
;
1207 vm_shared_region_unlock();
1209 /* no need to lock because this data is never modified... */
1210 sr_handle
= shared_region
->sr_mem_entry
;
1211 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1212 sr_map
= sr_mem_entry
->backing
.map
;
1213 sr_base_address
= shared_region
->sr_base_address
;
1215 SHARED_REGION_TRACE_DEBUG(
1216 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1217 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1218 (void *)VM_KERNEL_ADDRPERM(mappings
),
1219 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1221 /* get the VM object associated with the file to be mapped */
1222 file_object
= memory_object_control_to_vm_object(file_control
);
1224 assert(file_object
);
1226 /* establish the mappings */
1227 for (i
= 0; i
< mappings_count
; i
++) {
1228 SHARED_REGION_TRACE_INFO(
1229 ("shared_region: mapping[%d]: "
1230 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1231 "maxprot:0x%x prot:0x%x\n",
1233 (long long)mappings
[i
].sfm_address
,
1234 (long long)mappings
[i
].sfm_size
,
1235 (long long)mappings
[i
].sfm_file_offset
,
1236 mappings
[i
].sfm_max_prot
,
1237 mappings
[i
].sfm_init_prot
));
1239 if (mappings
[i
].sfm_address
< sfm_min_address
) {
1240 sfm_min_address
= mappings
[i
].sfm_address
;
1243 if (os_add_overflow(mappings
[i
].sfm_address
,
1244 mappings
[i
].sfm_size
,
1246 (vm_map_round_page(sfm_end
, VM_MAP_PAGE_MASK(sr_map
)) <
1247 mappings
[i
].sfm_address
)) {
1249 kr
= KERN_INVALID_ARGUMENT
;
1252 if (sfm_end
> sfm_max_address
) {
1253 sfm_max_address
= sfm_end
;
1256 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1257 /* zero-filled memory */
1258 map_port
= MACH_PORT_NULL
;
1260 /* file-backed memory */
1261 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1264 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1266 * This is the mapping that needs to be slid.
1268 if (mapping_to_slide
!= NULL
) {
1269 SHARED_REGION_TRACE_INFO(
1270 ("shared_region: mapping[%d]: "
1271 "address:0x%016llx size:0x%016llx "
1273 "maxprot:0x%x prot:0x%x "
1274 "will not be slid as only one such mapping is allowed...\n",
1276 (long long)mappings
[i
].sfm_address
,
1277 (long long)mappings
[i
].sfm_size
,
1278 (long long)mappings
[i
].sfm_file_offset
,
1279 mappings
[i
].sfm_max_prot
,
1280 mappings
[i
].sfm_init_prot
));
1282 mapping_to_slide
= &mappings
[i
];
1286 /* mapping's address is relative to the shared region base */
1288 mappings
[i
].sfm_address
- sr_base_address
;
1290 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1291 vmk_flags
.vmkf_already
= TRUE
;
1292 /* no copy-on-read for mapped binaries */
1293 vmk_flags
.vmkf_no_copy_on_read
= 1;
1295 /* establish that mapping, OK if it's "already" there */
1296 if (map_port
== MACH_PORT_NULL
) {
1298 * We want to map some anonymous memory in a
1300 * We have to create the VM object now, so that it
1301 * can be mapped "copy-on-write".
1303 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1304 VM_MAP_PAGE_MASK(sr_map
));
1305 object
= vm_object_allocate(obj_size
);
1306 if (object
== VM_OBJECT_NULL
) {
1307 kr
= KERN_RESOURCE_SHORTAGE
;
1312 vm_map_round_page(mappings
[i
].sfm_size
,
1313 VM_MAP_PAGE_MASK(sr_map
)),
1317 VM_KERN_MEMORY_NONE
,
1321 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1322 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1323 VM_INHERIT_DEFAULT
);
1326 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1327 kr
= vm_map_enter_mem_object(
1330 vm_map_round_page(mappings
[i
].sfm_size
,
1331 VM_MAP_PAGE_MASK(sr_map
)),
1335 VM_KERN_MEMORY_NONE
,
1337 mappings
[i
].sfm_file_offset
,
1339 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1340 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1341 VM_INHERIT_DEFAULT
);
1344 if (kr
== KERN_SUCCESS
) {
1346 * Record the first (chronologically) successful
1347 * mapping in this shared region.
1348 * We're protected by "sr_mapping_in_progress" here,
1349 * so no need to lock "shared_region".
1351 if (first_mapping
== (mach_vm_offset_t
) -1) {
1352 first_mapping
= target_address
;
1355 #if defined(HAS_APPLE_PAC)
1357 * Set "sr_slid_mapping"
1358 * it is used to get the userland address for address authentication.
1361 if ((slid_mapping
== (mach_vm_offset_t
) -1) &&
1362 (mapping_to_slide
== &mappings
[i
])) {
1363 slid_mapping
= target_address
;
1367 * Record the lowest writable address in this
1368 * sub map, to log any unexpected unnesting below
1369 * that address (see log_unnest_badness()).
1371 if ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) &&
1372 sr_map
->is_nested_map
&&
1373 (lowest_unnestable_addr
== 0 ||
1374 (target_address
< lowest_unnestable_addr
))) {
1375 lowest_unnestable_addr
= target_address
;
1378 if (map_port
== MACH_PORT_NULL
) {
1380 * Get rid of the VM object we just created
1381 * but failed to map.
1383 vm_object_deallocate(object
);
1384 object
= VM_OBJECT_NULL
;
1386 if (kr
== KERN_MEMORY_PRESENT
) {
1388 * This exact mapping was already there:
1391 SHARED_REGION_TRACE_INFO(
1392 ("shared_region: mapping[%d]: "
1393 "address:0x%016llx size:0x%016llx "
1395 "maxprot:0x%x prot:0x%x "
1396 "already mapped...\n",
1398 (long long)mappings
[i
].sfm_address
,
1399 (long long)mappings
[i
].sfm_size
,
1400 (long long)mappings
[i
].sfm_file_offset
,
1401 mappings
[i
].sfm_max_prot
,
1402 mappings
[i
].sfm_init_prot
));
1404 * We didn't establish this mapping ourselves;
1405 * let's reset its size, so that we do not
1406 * attempt to undo it if an error occurs later.
1408 mappings
[i
].sfm_size
= 0;
1416 if (kr
!= KERN_SUCCESS
) {
1417 /* the last mapping we tried (mappings[i]) failed ! */
1418 assert(i
< mappings_count
);
1419 SHARED_REGION_TRACE_ERROR(
1420 ("shared_region: mapping[%d]: "
1421 "address:0x%016llx size:0x%016llx "
1423 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1425 (long long)mappings
[i
].sfm_address
,
1426 (long long)mappings
[i
].sfm_size
,
1427 (long long)mappings
[i
].sfm_file_offset
,
1428 mappings
[i
].sfm_max_prot
,
1429 mappings
[i
].sfm_init_prot
,
1431 /* undo all the previous mappings */
1432 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1435 if (kr
== KERN_SUCCESS
&&
1437 mapping_to_slide
!= NULL
) {
1438 kr
= vm_shared_region_slide(slide
,
1439 mapping_to_slide
->sfm_file_offset
,
1440 mapping_to_slide
->sfm_size
,
1445 if (kr
!= KERN_SUCCESS
) {
1446 SHARED_REGION_TRACE_ERROR(
1447 ("shared_region: region_slide("
1448 "slide:0x%x start:0x%016llx "
1449 "size:0x%016llx) failed 0x%x\n",
1451 (long long)slide_start
,
1452 (long long)slide_size
,
1454 vm_shared_region_undo_mappings(sr_map
,
1461 if (kr
== KERN_SUCCESS
) {
1462 /* adjust the map's "lowest_unnestable_start" */
1463 lowest_unnestable_addr
&= ~(pmap_nesting_size_min
- 1);
1464 if (lowest_unnestable_addr
!=
1465 sr_map
->lowest_unnestable_start
) {
1466 vm_map_lock(sr_map
);
1467 sr_map
->lowest_unnestable_start
=
1468 lowest_unnestable_addr
;
1469 vm_map_unlock(sr_map
);
1473 vm_shared_region_lock();
1474 assert(shared_region
->sr_ref_count
> 1);
1475 assert(shared_region
->sr_mapping_in_progress
);
1477 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1478 if (kr
== KERN_SUCCESS
&&
1479 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1480 shared_region
->sr_first_mapping
= first_mapping
;
1484 * copy in the shared region UUID to the shared region structure.
1485 * we do this indirectly by first copying in the shared cache header
1486 * and then copying the UUID from there because we'll need to look
1487 * at other content from the shared cache header.
1489 if (kr
== KERN_SUCCESS
&& !shared_region
->sr_uuid_copied
) {
1490 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
),
1491 (char *)&sr_cache_header
,
1492 sizeof(sr_cache_header
));
1494 memcpy(&shared_region
->sr_uuid
, &sr_cache_header
.uuid
, sizeof(shared_region
->sr_uuid
));
1495 shared_region
->sr_uuid_copied
= TRUE
;
1497 #if DEVELOPMENT || DEBUG
1498 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1499 "offset:0 size:0x%016llx) failed with %d\n",
1500 (long long)shared_region
->sr_base_address
,
1501 (long long)shared_region
->sr_first_mapping
,
1502 (long long)sizeof(sr_cache_header
),
1504 #endif /* DEVELOPMENT || DEBUG */
1505 shared_region
->sr_uuid_copied
= FALSE
;
1510 * If the shared cache is associated with the init task (and is therefore the system shared cache),
1511 * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
1513 boolean_t is_init_task
= (task_pid(current_task()) == 1);
1514 if (shared_region
->sr_uuid_copied
&& is_init_task
) {
1515 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1516 if (sr_cache_header
.locallyBuiltCache
) {
1517 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_START
);
1518 size_t image_array_length
= (sr_cache_header
.imagesTextCount
* sizeof(struct _dyld_cache_image_text_info
));
1519 struct _dyld_cache_image_text_info
*sr_image_layout
= kalloc(image_array_length
);
1520 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1521 sr_cache_header
.imagesTextOffset
), (char *)sr_image_layout
, image_array_length
);
1523 shared_region
->sr_images
= kalloc(sr_cache_header
.imagesTextCount
* sizeof(struct dyld_uuid_info_64
));
1524 for (size_t index
= 0; index
< sr_cache_header
.imagesTextCount
; index
++) {
1525 memcpy((char *)&shared_region
->sr_images
[index
].imageUUID
, (char *)&sr_image_layout
[index
].uuid
,
1526 sizeof(shared_region
->sr_images
[index
].imageUUID
));
1527 shared_region
->sr_images
[index
].imageLoadAddress
= sr_image_layout
[index
].loadAddress
;
1530 assert(sr_cache_header
.imagesTextCount
< UINT32_MAX
);
1531 shared_region
->sr_images_count
= (uint32_t) sr_cache_header
.imagesTextCount
;
1533 #if DEVELOPMENT || DEBUG
1534 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1535 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1536 (long long)shared_region
->sr_base_address
,
1537 (long long)shared_region
->sr_first_mapping
,
1538 (long long)sr_cache_header
.imagesTextOffset
,
1539 (long long)image_array_length
,
1541 #endif /* DEVELOPMENT || DEBUG */
1543 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_END
, shared_region
->sr_images_count
);
1544 kfree(sr_image_layout
, image_array_length
);
1545 sr_image_layout
= NULL
;
1547 init_task_shared_region
= shared_region
;
1550 if (kr
== KERN_SUCCESS
) {
1552 * If we succeeded, we know the bounds of the shared region.
1553 * Trim our pmaps to only cover this range (if applicable to
1556 pmap_trim(current_map()->pmap
, sr_map
->pmap
, sfm_min_address
, sfm_min_address
, sfm_max_address
- sfm_min_address
);
1559 /* we're done working on that shared region */
1560 shared_region
->sr_mapping_in_progress
= FALSE
;
1561 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1562 vm_shared_region_unlock();
1565 SHARED_REGION_TRACE_DEBUG(
1566 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1567 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1568 (void *)VM_KERNEL_ADDRPERM(mappings
),
1569 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1574 * Retrieve a task's shared region and grab an extra reference to
1575 * make sure it doesn't disappear while the caller is using it.
1576 * The caller is responsible for consuming that extra reference if
1579 * This also tries to trim the pmap for the shared region.
1582 vm_shared_region_trim_and_get(task_t task
)
1584 vm_shared_region_t shared_region
;
1585 ipc_port_t sr_handle
;
1586 vm_named_entry_t sr_mem_entry
;
1589 /* Get the shared region and the map. */
1590 shared_region
= vm_shared_region_get(task
);
1591 if (shared_region
== NULL
) {
1595 sr_handle
= shared_region
->sr_mem_entry
;
1596 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1597 sr_map
= sr_mem_entry
->backing
.map
;
1599 /* Trim the pmap if possible. */
1600 pmap_trim(task
->map
->pmap
, sr_map
->pmap
, 0, 0, 0);
1602 return shared_region
;
1606 * Enter the appropriate shared region into "map" for "task".
1607 * This involves looking up the shared region (and possibly creating a new
1608 * one) for the desired environment, then mapping the VM sub map into the
1609 * task's VM "map", with the appropriate level of pmap-nesting.
1612 vm_shared_region_enter(
1613 struct _vm_map
*map
,
1618 cpu_subtype_t cpu_subtype
)
1621 vm_shared_region_t shared_region
;
1622 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1623 vm_map_size_t sr_size
, mapping_size
;
1624 vm_map_offset_t sr_pmap_nesting_start
;
1625 vm_map_size_t sr_pmap_nesting_size
;
1626 ipc_port_t sr_handle
;
1627 vm_prot_t cur_prot
, max_prot
;
1629 SHARED_REGION_TRACE_DEBUG(
1630 ("shared_region: -> "
1631 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1632 (void *)VM_KERNEL_ADDRPERM(map
),
1633 (void *)VM_KERNEL_ADDRPERM(task
),
1634 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1635 cpu
, cpu_subtype
, is_64bit
));
1637 /* lookup (create if needed) the shared region for this environment */
1638 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, cpu_subtype
, is_64bit
);
1639 if (shared_region
== NULL
) {
1640 /* this should not happen ! */
1641 SHARED_REGION_TRACE_ERROR(
1642 ("shared_region: -> "
1643 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): "
1644 "lookup failed !\n",
1645 (void *)VM_KERNEL_ADDRPERM(map
),
1646 (void *)VM_KERNEL_ADDRPERM(task
),
1647 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1648 cpu
, cpu_subtype
, is_64bit
));
1649 //panic("shared_region_enter: lookup failed\n");
1650 return KERN_FAILURE
;
1654 /* no need to lock since this data is never modified */
1655 sr_address
= shared_region
->sr_base_address
;
1656 sr_size
= shared_region
->sr_size
;
1657 sr_handle
= shared_region
->sr_mem_entry
;
1658 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1659 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1661 cur_prot
= VM_PROT_READ
;
1664 * XXX BINARY COMPATIBILITY
1665 * java6 apparently needs to modify some code in the
1666 * dyld shared cache and needs to be allowed to add
1669 max_prot
= VM_PROT_ALL
;
1670 #else /* __x86_64__ */
1671 max_prot
= VM_PROT_READ
;
1672 #endif /* __x86_64__ */
1674 * Start mapping the shared region's VM sub map into the task's VM map.
1678 if (sr_pmap_nesting_start
> sr_address
) {
1679 /* we need to map a range without pmap-nesting first */
1680 target_address
= sr_address
;
1681 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1682 kr
= vm_map_enter_mem_object(
1688 VM_MAP_KERNEL_FLAGS_NONE
,
1689 VM_KERN_MEMORY_NONE
,
1696 if (kr
!= KERN_SUCCESS
) {
1697 SHARED_REGION_TRACE_ERROR(
1698 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1699 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1700 (void *)VM_KERNEL_ADDRPERM(map
),
1701 (void *)VM_KERNEL_ADDRPERM(task
),
1702 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1703 cpu
, cpu_subtype
, is_64bit
,
1704 (long long)target_address
,
1705 (long long)mapping_size
,
1706 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1709 SHARED_REGION_TRACE_DEBUG(
1710 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1711 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1712 (void *)VM_KERNEL_ADDRPERM(map
),
1713 (void *)VM_KERNEL_ADDRPERM(task
),
1714 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1715 cpu
, cpu_subtype
, is_64bit
,
1716 (long long)target_address
, (long long)mapping_size
,
1717 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1718 sr_offset
+= mapping_size
;
1719 sr_size
-= mapping_size
;
1722 * We may need to map several pmap-nested portions, due to platform
1723 * specific restrictions on pmap nesting.
1724 * The pmap-nesting is triggered by the "vmkf_nested_pmap" flag...
1727 sr_pmap_nesting_size
> 0;
1728 sr_offset
+= mapping_size
,
1729 sr_size
-= mapping_size
,
1730 sr_pmap_nesting_size
-= mapping_size
) {
1731 vm_map_kernel_flags_t vmk_flags
;
1733 target_address
= sr_address
+ sr_offset
;
1734 mapping_size
= sr_pmap_nesting_size
;
1735 if (mapping_size
> pmap_nesting_size_max
) {
1736 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1738 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1739 vmk_flags
.vmkf_nested_pmap
= TRUE
;
1740 kr
= vm_map_enter_mem_object(
1747 VM_MEMORY_SHARED_PMAP
,
1754 if (kr
!= KERN_SUCCESS
) {
1755 SHARED_REGION_TRACE_ERROR(
1756 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1757 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1758 (void *)VM_KERNEL_ADDRPERM(map
),
1759 (void *)VM_KERNEL_ADDRPERM(task
),
1760 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1761 cpu
, cpu_subtype
, is_64bit
,
1762 (long long)target_address
,
1763 (long long)mapping_size
,
1764 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1767 SHARED_REGION_TRACE_DEBUG(
1768 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1769 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1770 (void *)VM_KERNEL_ADDRPERM(map
),
1771 (void *)VM_KERNEL_ADDRPERM(task
),
1772 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1773 cpu
, cpu_subtype
, is_64bit
,
1774 (long long)target_address
, (long long)mapping_size
,
1775 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1778 /* and there's some left to be mapped without pmap-nesting */
1779 target_address
= sr_address
+ sr_offset
;
1780 mapping_size
= sr_size
;
1781 kr
= vm_map_enter_mem_object(
1787 VM_MAP_KERNEL_FLAGS_NONE
,
1788 VM_KERN_MEMORY_NONE
,
1795 if (kr
!= KERN_SUCCESS
) {
1796 SHARED_REGION_TRACE_ERROR(
1797 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1798 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1799 (void *)VM_KERNEL_ADDRPERM(map
),
1800 (void *)VM_KERNEL_ADDRPERM(task
),
1801 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1802 cpu
, cpu_subtype
, is_64bit
,
1803 (long long)target_address
,
1804 (long long)mapping_size
,
1805 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1808 SHARED_REGION_TRACE_DEBUG(
1809 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1810 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1811 (void *)VM_KERNEL_ADDRPERM(map
),
1812 (void *)VM_KERNEL_ADDRPERM(task
),
1813 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1814 cpu
, cpu_subtype
, is_64bit
,
1815 (long long)target_address
, (long long)mapping_size
,
1816 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1817 sr_offset
+= mapping_size
;
1818 sr_size
-= mapping_size
;
1820 assert(sr_size
== 0);
1823 if (kr
== KERN_SUCCESS
) {
1824 /* let the task use that shared region */
1825 vm_shared_region_set(task
, shared_region
);
1827 /* drop our reference since we're not using it */
1828 vm_shared_region_deallocate(shared_region
);
1829 vm_shared_region_set(task
, NULL
);
1832 SHARED_REGION_TRACE_DEBUG(
1833 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
1834 (void *)VM_KERNEL_ADDRPERM(map
),
1835 (void *)VM_KERNEL_ADDRPERM(task
),
1836 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1837 cpu
, cpu_subtype
, is_64bit
, kr
));
1841 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1842 struct vm_shared_region_slide_info slide_info
;
1845 vm_shared_region_sliding_valid(uint32_t slide
)
1847 kern_return_t kr
= KERN_SUCCESS
;
1848 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1850 /* No region yet? we're fine. */
1855 if ((sr
->sr_slid
== TRUE
) && slide
) {
1856 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1857 printf("Only one shared region can be slid\n");
1861 * Request for sliding when we've
1862 * already done it with exactly the
1863 * same slide value before.
1864 * This isn't wrong technically but
1865 * we don't want to slide again and
1866 * so we return this value.
1868 kr
= KERN_INVALID_ARGUMENT
;
1871 vm_shared_region_deallocate(sr
);
1876 vm_shared_region_slide_mapping(
1877 vm_shared_region_t sr
,
1878 mach_vm_size_t slide_info_size
,
1879 mach_vm_offset_t start
,
1880 mach_vm_size_t size
,
1881 mach_vm_offset_t slid_mapping
,
1883 memory_object_control_t sr_file_control
)
1887 vm_shared_region_slide_info_t si
;
1888 vm_offset_t slide_info_entry
;
1889 vm_map_entry_t slid_entry
, tmp_entry
;
1890 struct vm_map_entry tmp_entry_store
;
1891 memory_object_t sr_pager
;
1894 vm_map_kernel_flags_t vmk_flags
;
1895 vm_map_offset_t map_addr
;
1897 tmp_entry
= VM_MAP_ENTRY_NULL
;
1898 sr_pager
= MEMORY_OBJECT_NULL
;
1899 object
= VM_OBJECT_NULL
;
1900 slide_info_entry
= 0;
1902 assert(sr
->sr_slide_in_progress
);
1903 assert(!sr
->sr_slid
);
1905 si
= vm_shared_region_get_slide_info(sr
);
1906 assert(si
->slide_object
== VM_OBJECT_NULL
);
1907 assert(si
->slide_info_entry
== NULL
);
1909 if (sr_file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1910 return KERN_INVALID_ARGUMENT
;
1912 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1913 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1914 return KERN_FAILURE
;
1917 kr
= kmem_alloc(kernel_map
,
1918 (vm_offset_t
*) &slide_info_entry
,
1919 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1920 if (kr
!= KERN_SUCCESS
) {
1924 object
= memory_object_control_to_vm_object(sr_file_control
);
1925 if (object
== VM_OBJECT_NULL
|| object
->internal
) {
1926 object
= VM_OBJECT_NULL
;
1927 kr
= KERN_INVALID_ADDRESS
;
1931 vm_object_lock(object
);
1932 vm_object_reference_locked(object
); /* for si->slide_object */
1933 object
->object_is_shared_cache
= TRUE
;
1934 vm_object_unlock(object
);
1936 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1937 si
->slide_info_size
= slide_info_size
;
1939 assert(slid_mapping
!= (mach_vm_offset_t
) -1);
1940 si
->slid_address
= slid_mapping
+ sr
->sr_base_address
;
1941 si
->slide_object
= object
;
1943 si
->end
= si
->start
+ size
;
1945 #if defined(HAS_APPLE_PAC)
1946 if (sr
->sr_cpu_type
== CPU_TYPE_ARM64
&&
1947 sr
->sr_cpu_subtype
== CPU_SUBTYPE_ARM64E
) {
1948 /* arm64e has pointer authentication */
1949 si
->si_ptrauth
= TRUE
;
1951 #endif /* HAS_APPLE_PAC */
1953 /* find the shared region's map entry to slide */
1954 sr_map
= vm_shared_region_vm_map(sr
);
1955 vm_map_lock_read(sr_map
);
1956 if (!vm_map_lookup_entry(sr_map
,
1959 /* no mapping there */
1960 vm_map_unlock(sr_map
);
1961 kr
= KERN_INVALID_ARGUMENT
;
1965 * We might want to clip the entry to cover only the portion that
1966 * needs sliding (offsets si->start to si->end in the shared cache
1967 * file at the bottom of the shadow chain).
1968 * In practice, it seems to cover the entire DATA segment...
1970 tmp_entry_store
= *slid_entry
;
1971 tmp_entry
= &tmp_entry_store
;
1972 slid_entry
= VM_MAP_ENTRY_NULL
;
1973 /* extra ref to keep object alive while map is unlocked */
1974 vm_object_reference(VME_OBJECT(tmp_entry
));
1975 vm_map_unlock_read(sr_map
);
1977 /* create a "shared_region" sliding pager */
1978 sr_pager
= shared_region_pager_setup(VME_OBJECT(tmp_entry
),
1979 VME_OFFSET(tmp_entry
),
1981 if (sr_pager
== NULL
) {
1982 kr
= KERN_RESOURCE_SHORTAGE
;
1986 /* map that pager over the portion of the mapping that needs sliding */
1987 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
1988 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1989 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
1990 map_addr
= tmp_entry
->vme_start
;
1991 kr
= vm_map_enter_mem_object(sr_map
,
1993 (tmp_entry
->vme_end
-
1994 tmp_entry
->vme_start
),
1995 (mach_vm_offset_t
) 0,
1998 VM_KERN_MEMORY_NONE
,
1999 (ipc_port_t
)(uintptr_t) sr_pager
,
2002 tmp_entry
->protection
,
2003 tmp_entry
->max_protection
,
2004 tmp_entry
->inheritance
);
2005 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x\n", kr
);
2006 assertf(map_addr
== tmp_entry
->vme_start
,
2007 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
2009 (uint64_t) tmp_entry
->vme_start
,
2018 * Release the sr_pager reference obtained by
2019 * shared_region_pager_setup().
2020 * The mapping (if it succeeded) is now holding a reference on
2021 * the memory object.
2023 memory_object_deallocate(sr_pager
);
2024 sr_pager
= MEMORY_OBJECT_NULL
;
2027 /* release extra ref on tmp_entry's VM object */
2028 vm_object_deallocate(VME_OBJECT(tmp_entry
));
2029 tmp_entry
= VM_MAP_ENTRY_NULL
;
2032 if (kr
!= KERN_SUCCESS
) {
2034 if (slide_info_entry
) {
2035 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
2036 slide_info_entry
= 0;
2038 if (si
->slide_object
) {
2039 vm_object_deallocate(si
->slide_object
);
2040 si
->slide_object
= VM_OBJECT_NULL
;
2047 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
)
2049 return (void*)sr
->sr_slide_info
.slide_info_entry
;
2052 static kern_return_t
2053 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info
)
2055 uint32_t pageIndex
= 0;
2056 uint16_t entryIndex
= 0;
2057 uint16_t *toc
= NULL
;
2059 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2060 for (; pageIndex
< s_info
->toc_count
; pageIndex
++) {
2061 entryIndex
= (uint16_t)(toc
[pageIndex
]);
2063 if (entryIndex
>= s_info
->entry_count
) {
2064 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
2065 return KERN_FAILURE
;
2068 return KERN_SUCCESS
;
2071 static kern_return_t
2072 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info
, mach_vm_size_t slide_info_size
)
2074 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2075 return KERN_FAILURE
;
2078 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2080 uint32_t page_starts_count
= s_info
->page_starts_count
;
2081 uint32_t page_extras_count
= s_info
->page_extras_count
;
2082 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2083 if (num_trailing_entries
< page_starts_count
) {
2084 return KERN_FAILURE
;
2087 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2088 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2089 if (trailing_size
>> 1 != num_trailing_entries
) {
2090 return KERN_FAILURE
;
2093 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2094 if (required_size
< sizeof(*s_info
)) {
2095 return KERN_FAILURE
;
2098 if (required_size
> slide_info_size
) {
2099 return KERN_FAILURE
;
2102 return KERN_SUCCESS
;
2105 static kern_return_t
2106 vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info
, mach_vm_size_t slide_info_size
)
2108 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2109 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info
->page_size
, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE
);
2110 return KERN_FAILURE
;
2113 uint32_t page_starts_count
= s_info
->page_starts_count
;
2114 mach_vm_size_t num_trailing_entries
= page_starts_count
;
2115 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2116 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2117 if (required_size
< sizeof(*s_info
)) {
2118 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)sizeof(*s_info
));
2119 return KERN_FAILURE
;
2122 if (required_size
> slide_info_size
) {
2123 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)slide_info_size
);
2124 return KERN_FAILURE
;
2127 return KERN_SUCCESS
;
2130 static kern_return_t
2131 vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info
, mach_vm_size_t slide_info_size
)
2133 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2134 return KERN_FAILURE
;
2137 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2139 uint32_t page_starts_count
= s_info
->page_starts_count
;
2140 uint32_t page_extras_count
= s_info
->page_extras_count
;
2141 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2142 if (num_trailing_entries
< page_starts_count
) {
2143 return KERN_FAILURE
;
2146 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2147 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2148 if (trailing_size
>> 1 != num_trailing_entries
) {
2149 return KERN_FAILURE
;
2152 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2153 if (required_size
< sizeof(*s_info
)) {
2154 return KERN_FAILURE
;
2157 if (required_size
> slide_info_size
) {
2158 return KERN_FAILURE
;
2161 return KERN_SUCCESS
;
2166 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
2168 vm_shared_region_slide_info_t si
;
2169 vm_shared_region_slide_info_entry_t s_info
;
2172 si
= vm_shared_region_get_slide_info(sr
);
2173 s_info
= si
->slide_info_entry
;
2175 kr
= mach_vm_protect(kernel_map
,
2176 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
2177 (mach_vm_size_t
) si
->slide_info_size
,
2178 TRUE
, VM_PROT_READ
);
2179 if (kr
!= KERN_SUCCESS
) {
2180 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
2183 if (s_info
->version
== 1) {
2184 kr
= vm_shared_region_slide_sanity_check_v1(&s_info
->v1
);
2185 } else if (s_info
->version
== 2) {
2186 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, si
->slide_info_size
);
2187 } else if (s_info
->version
== 3) {
2188 kr
= vm_shared_region_slide_sanity_check_v3(&s_info
->v3
, si
->slide_info_size
);
2189 } else if (s_info
->version
== 4) {
2190 kr
= vm_shared_region_slide_sanity_check_v4(&s_info
->v4
, si
->slide_info_size
);
2194 if (kr
!= KERN_SUCCESS
) {
2198 return KERN_SUCCESS
;
2200 if (si
->slide_info_entry
!= NULL
) {
2201 kmem_free(kernel_map
,
2202 (vm_offset_t
) si
->slide_info_entry
,
2203 (vm_size_t
) si
->slide_info_size
);
2205 vm_object_deallocate(si
->slide_object
);
2206 si
->slide_object
= NULL
;
2210 si
->slide_info_entry
= NULL
;
2211 si
->slide_info_size
= 0;
2213 return KERN_FAILURE
;
2216 static kern_return_t
2217 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2219 uint16_t *toc
= NULL
;
2220 slide_info_entry_toc_t bitmap
= NULL
;
2221 uint32_t i
= 0, j
= 0;
2223 uint32_t slide
= si
->slide
;
2224 int is_64
= task_has_64Bit_addr(current_task());
2226 vm_shared_region_slide_info_entry_v1_t s_info
= &si
->slide_info_entry
->v1
;
2227 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2229 if (pageIndex
>= s_info
->toc_count
) {
2230 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
2232 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
2233 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
2235 if (entryIndex
>= s_info
->entry_count
) {
2236 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
2238 bitmap
= &slide_info_entries
[entryIndex
];
2240 for (i
= 0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
2241 b
= bitmap
->entry
[i
];
2243 for (j
= 0; j
< 8; ++j
) {
2245 uint32_t *ptr_to_slide
;
2248 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
) + (sizeof(uint32_t) * (i
* 8 + j
)));
2249 old_value
= *ptr_to_slide
;
2250 *ptr_to_slide
+= slide
;
2251 if (is_64
&& *ptr_to_slide
< old_value
) {
2253 * We just slid the low 32 bits of a 64-bit pointer
2254 * and it looks like there should have been a carry-over
2255 * to the upper 32 bits.
2256 * The sliding failed...
2258 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
2259 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
2260 return KERN_FAILURE
;
2269 return KERN_SUCCESS
;
2272 static kern_return_t
2274 uint8_t *page_content
,
2275 uint16_t start_offset
,
2276 uint32_t slide_amount
,
2277 vm_shared_region_slide_info_entry_v2_t s_info
)
2279 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2281 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2282 const uint32_t value_mask
= ~delta_mask
;
2283 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2284 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2286 uint32_t page_offset
= start_offset
;
2289 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2293 loc
= page_content
+ page_offset
;
2294 memcpy(&value
, loc
, sizeof(value
));
2295 delta
= (value
& delta_mask
) >> delta_shift
;
2296 value
&= value_mask
;
2300 value
+= slide_amount
;
2302 memcpy(loc
, &value
, sizeof(value
));
2303 page_offset
+= delta
;
2306 /* If the offset went past the end of the page, then the slide data is invalid. */
2307 if (page_offset
> last_page_offset
) {
2308 return KERN_FAILURE
;
2310 return KERN_SUCCESS
;
2313 static kern_return_t
2315 uint8_t *page_content
,
2316 uint16_t start_offset
,
2317 uint32_t slide_amount
,
2318 vm_shared_region_slide_info_entry_v2_t s_info
)
2320 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2322 const uint64_t delta_mask
= s_info
->delta_mask
;
2323 const uint64_t value_mask
= ~delta_mask
;
2324 const uint64_t value_add
= s_info
->value_add
;
2325 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2327 uint32_t page_offset
= start_offset
;
2330 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2334 loc
= page_content
+ page_offset
;
2335 memcpy(&value
, loc
, sizeof(value
));
2336 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2337 value
&= value_mask
;
2341 value
+= slide_amount
;
2343 memcpy(loc
, &value
, sizeof(value
));
2344 page_offset
+= delta
;
2347 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2348 /* If a pointer straddling the page boundary needs to be adjusted, then
2349 * add the slide to the lower half. The encoding guarantees that the upper
2350 * half on the next page will need no masking.
2352 * This assumes a little-endian machine and that the region being slid
2353 * never crosses a 4 GB boundary. */
2355 uint8_t *loc
= page_content
+ page_offset
;
2358 memcpy(&value
, loc
, sizeof(value
));
2359 value
+= slide_amount
;
2360 memcpy(loc
, &value
, sizeof(value
));
2361 } else if (page_offset
> last_page_offset
) {
2362 return KERN_FAILURE
;
2365 return KERN_SUCCESS
;
2368 static kern_return_t
2372 uint8_t *page_content
,
2373 uint16_t start_offset
,
2374 uint32_t slide_amount
,
2375 vm_shared_region_slide_info_entry_v2_t s_info
)
2379 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2381 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2384 if (kr
!= KERN_SUCCESS
) {
2385 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2386 pageIndex
, start_offset
, slide_amount
);
2391 static kern_return_t
2392 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2394 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->slide_info_entry
->v2
;
2395 const uint32_t slide_amount
= si
->slide
;
2397 /* The high bits of the delta_mask field are nonzero precisely when the shared
2398 * cache is 64-bit. */
2399 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2401 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2402 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2404 uint8_t *page_content
= (uint8_t *)vaddr
;
2405 uint16_t page_entry
;
2407 if (pageIndex
>= s_info
->page_starts_count
) {
2408 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2409 pageIndex
, s_info
->page_starts_count
);
2410 return KERN_FAILURE
;
2412 page_entry
= page_starts
[pageIndex
];
2414 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2415 return KERN_SUCCESS
;
2418 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2419 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2423 uint16_t page_start_offset
;
2426 if (chain_index
>= s_info
->page_extras_count
) {
2427 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2428 chain_index
, s_info
->page_extras_count
);
2429 return KERN_FAILURE
;
2431 info
= page_extras
[chain_index
];
2432 page_start_offset
= (info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2434 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2435 if (kr
!= KERN_SUCCESS
) {
2436 return KERN_FAILURE
;
2440 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2442 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2445 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2446 if (kr
!= KERN_SUCCESS
) {
2447 return KERN_FAILURE
;
2451 return KERN_SUCCESS
;
2455 static kern_return_t
2456 vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, __unused mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2458 vm_shared_region_slide_info_entry_v3_t s_info
= &si
->slide_info_entry
->v3
;
2459 const uint32_t slide_amount
= si
->slide
;
2461 uint8_t *page_content
= (uint8_t *)vaddr
;
2462 uint16_t page_entry
;
2464 if (pageIndex
>= s_info
->page_starts_count
) {
2465 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2466 pageIndex
, s_info
->page_starts_count
);
2467 return KERN_FAILURE
;
2469 page_entry
= s_info
->page_starts
[pageIndex
];
2471 if (page_entry
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2472 return KERN_SUCCESS
;
2475 uint8_t* rebaseLocation
= page_content
;
2476 uint64_t delta
= page_entry
;
2478 rebaseLocation
+= delta
;
2480 memcpy(&value
, rebaseLocation
, sizeof(value
));
2481 delta
= ((value
& 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2483 // A pointer is one of :
2485 // uint64_t pointerValue : 51;
2486 // uint64_t offsetToNextPointer : 11;
2487 // uint64_t isBind : 1 = 0;
2488 // uint64_t authenticated : 1 = 0;
2491 // uint32_t offsetFromSharedCacheBase;
2492 // uint16_t diversityData;
2493 // uint16_t hasAddressDiversity : 1;
2494 // uint16_t hasDKey : 1;
2495 // uint16_t hasBKey : 1;
2496 // uint16_t offsetToNextPointer : 11;
2497 // uint16_t isBind : 1;
2498 // uint16_t authenticated : 1 = 1;
2501 bool isBind
= (value
& (1ULL << 62)) == 1;
2503 return KERN_FAILURE
;
2506 #if defined(HAS_APPLE_PAC)
2507 uint16_t diversity_data
= (uint16_t)(value
>> 32);
2508 bool hasAddressDiversity
= (value
& (1ULL << 48)) != 0;
2509 ptrauth_key key
= (ptrauth_key
)((value
>> 49) & 0x3);
2510 #endif /* HAS_APPLE_PAC */
2511 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
2513 if (isAuthenticated
) {
2514 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2515 value
= (value
& 0xFFFFFFFF) + slide_amount
;
2516 // Add in the offset from the mach_header
2517 const uint64_t value_add
= s_info
->value_add
;
2520 #if defined(HAS_APPLE_PAC)
2521 uint64_t discriminator
= diversity_data
;
2522 if (hasAddressDiversity
) {
2523 // First calculate a new discriminator using the address of where we are trying to store the value
2524 uintptr_t pageOffset
= rebaseLocation
- page_content
;
2525 discriminator
= __builtin_ptrauth_blend_discriminator((void*)(((uintptr_t)uservaddr
) + pageOffset
), discriminator
);
2528 if (si
->si_ptrauth
&&
2529 !(BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
)) {
2531 * these pointers are used in user mode. disable the kernel key diversification
2532 * so we can sign them for use in user mode.
2534 value
= (uintptr_t)pmap_sign_user_ptr((void *)value
, key
, discriminator
);
2536 #endif /* HAS_APPLE_PAC */
2538 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2539 // Regular pointer which needs to fit in 51-bits of value.
2540 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2541 // and the bottom 43-bits to be fit in to 51-bits.
2542 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
2543 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
2544 uint64_t targetValue
= (top8Bits
<< 13) | bottom43Bits
;
2545 value
= targetValue
+ slide_amount
;
2548 memcpy(rebaseLocation
, &value
, sizeof(value
));
2549 } while (delta
!= 0);
2551 return KERN_SUCCESS
;
2554 static kern_return_t
2556 uint8_t *page_content
,
2557 uint16_t start_offset
,
2558 uint32_t slide_amount
,
2559 vm_shared_region_slide_info_entry_v4_t s_info
)
2561 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2563 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2564 const uint32_t value_mask
= ~delta_mask
;
2565 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2566 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2568 uint32_t page_offset
= start_offset
;
2571 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2575 loc
= page_content
+ page_offset
;
2576 memcpy(&value
, loc
, sizeof(value
));
2577 delta
= (value
& delta_mask
) >> delta_shift
;
2578 value
&= value_mask
;
2580 if ((value
& 0xFFFF8000) == 0) {
2581 // small positive non-pointer, use as-is
2582 } else if ((value
& 0x3FFF8000) == 0x3FFF8000) {
2583 // small negative non-pointer
2584 value
|= 0xC0000000;
2586 // pointer that needs rebasing
2588 value
+= slide_amount
;
2590 memcpy(loc
, &value
, sizeof(value
));
2591 page_offset
+= delta
;
2594 /* If the offset went past the end of the page, then the slide data is invalid. */
2595 if (page_offset
> last_page_offset
) {
2596 return KERN_FAILURE
;
2598 return KERN_SUCCESS
;
2601 static kern_return_t
2602 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2604 vm_shared_region_slide_info_entry_v4_t s_info
= &si
->slide_info_entry
->v4
;
2605 const uint32_t slide_amount
= si
->slide
;
2607 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2608 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2610 uint8_t *page_content
= (uint8_t *)vaddr
;
2611 uint16_t page_entry
;
2613 if (pageIndex
>= s_info
->page_starts_count
) {
2614 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2615 pageIndex
, s_info
->page_starts_count
);
2616 return KERN_FAILURE
;
2618 page_entry
= page_starts
[pageIndex
];
2620 if (page_entry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2621 return KERN_SUCCESS
;
2624 if (page_entry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2625 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
;
2629 uint16_t page_start_offset
;
2632 if (chain_index
>= s_info
->page_extras_count
) {
2633 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2634 chain_index
, s_info
->page_extras_count
);
2635 return KERN_FAILURE
;
2637 info
= page_extras
[chain_index
];
2638 page_start_offset
= (info
& DYLD_CACHE_SLIDE4_PAGE_INDEX
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2640 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2641 if (kr
!= KERN_SUCCESS
) {
2642 return KERN_FAILURE
;
2646 } while (!(info
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
));
2648 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2651 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2652 if (kr
!= KERN_SUCCESS
) {
2653 return KERN_FAILURE
;
2657 return KERN_SUCCESS
;
2663 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2665 if (si
->slide_info_entry
->version
== 1) {
2666 return vm_shared_region_slide_page_v1(si
, vaddr
, pageIndex
);
2667 } else if (si
->slide_info_entry
->version
== 2) {
2668 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2669 } else if (si
->slide_info_entry
->version
== 3) {
2670 return vm_shared_region_slide_page_v3(si
, vaddr
, uservaddr
, pageIndex
);
2671 } else if (si
->slide_info_entry
->version
== 4) {
2672 return vm_shared_region_slide_page_v4(si
, vaddr
, pageIndex
);
2674 return KERN_FAILURE
;
2678 /******************************************************************************/
2679 /* Comm page support */
2680 /******************************************************************************/
2682 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2683 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2684 vm_named_entry_t commpage32_entry
= NULL
;
2685 vm_named_entry_t commpage64_entry
= NULL
;
2686 vm_map_t commpage32_map
= VM_MAP_NULL
;
2687 vm_map_t commpage64_map
= VM_MAP_NULL
;
2689 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2690 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2691 vm_named_entry_t commpage_text32_entry
= NULL
;
2692 vm_named_entry_t commpage_text64_entry
= NULL
;
2693 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2694 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
2696 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
2697 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
2699 #if defined(__i386__) || defined(__x86_64__)
2701 * Create a memory entry, VM submap and pmap for one commpage.
2705 ipc_port_t
*handlep
,
2709 vm_named_entry_t mem_entry
;
2712 SHARED_REGION_TRACE_DEBUG(
2713 ("commpage: -> _init(0x%llx)\n",
2716 kr
= mach_memory_entry_allocate(&mem_entry
,
2718 if (kr
!= KERN_SUCCESS
) {
2719 panic("_vm_commpage_init: could not allocate mem_entry");
2721 new_map
= vm_map_create(pmap_create_options(NULL
, 0, 0), 0, size
, PMAP_CREATE_64BIT
);
2722 if (new_map
== VM_MAP_NULL
) {
2723 panic("_vm_commpage_init: could not allocate VM map");
2725 mem_entry
->backing
.map
= new_map
;
2726 mem_entry
->internal
= TRUE
;
2727 mem_entry
->is_sub_map
= TRUE
;
2728 mem_entry
->offset
= 0;
2729 mem_entry
->protection
= VM_PROT_ALL
;
2730 mem_entry
->size
= size
;
2732 SHARED_REGION_TRACE_DEBUG(
2733 ("commpage: _init(0x%llx) <- %p\n",
2734 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
2740 * Initialize the comm text pages at boot time
2742 extern u_int32_t
random(void);
2744 vm_commpage_text_init(void)
2746 SHARED_REGION_TRACE_DEBUG(
2747 ("commpage text: ->init()\n"));
2748 #if defined(__i386__) || defined(__x86_64__)
2749 /* create the 32 bit comm text page */
2750 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
2751 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2752 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
2753 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
2754 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
2755 /* XXX if (cpu_is_64bit_capable()) ? */
2756 /* create the 64-bit comm page */
2757 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
2758 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2759 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
2760 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
2761 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
2763 commpage_text_populate();
2764 #elif defined(__arm64__) || defined(__arm__)
2766 #error Unknown architecture.
2767 #endif /* __i386__ || __x86_64__ */
2768 /* populate the routines in here */
2769 SHARED_REGION_TRACE_DEBUG(
2770 ("commpage text: init() <-\n"));
2774 * Initialize the comm pages at boot time.
2777 vm_commpage_init(void)
2779 SHARED_REGION_TRACE_DEBUG(
2780 ("commpage: -> init()\n"));
2782 #if defined(__i386__) || defined(__x86_64__)
2783 /* create the 32-bit comm page */
2784 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
2785 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
2786 commpage32_map
= commpage32_entry
->backing
.map
;
2788 /* XXX if (cpu_is_64bit_capable()) ? */
2789 /* create the 64-bit comm page */
2790 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
2791 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
2792 commpage64_map
= commpage64_entry
->backing
.map
;
2794 #endif /* __i386__ || __x86_64__ */
2796 /* populate them according to this specific platform */
2797 commpage_populate();
2798 __commpage_setup
= 1;
2799 #if !CONFIG_EMBEDDED
2800 if (__system_power_source
== 0) {
2801 post_sys_powersource_internal(0, 1);
2805 SHARED_REGION_TRACE_DEBUG(
2806 ("commpage: init() <-\n"));
2810 * Enter the appropriate comm page into the task's address space.
2811 * This is called at exec() time via vm_map_exec().
2819 #if defined(__arm__)
2820 #pragma unused(is64bit)
2823 return KERN_SUCCESS
;
2824 #elif defined(__arm64__)
2825 #pragma unused(is64bit)
2828 pmap_insert_sharedpage(vm_map_pmap(map
));
2829 return KERN_SUCCESS
;
2831 ipc_port_t commpage_handle
, commpage_text_handle
;
2832 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
2833 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
2835 vm_map_kernel_flags_t vmk_flags
;
2838 SHARED_REGION_TRACE_DEBUG(
2839 ("commpage: -> enter(%p,%p)\n",
2840 (void *)VM_KERNEL_ADDRPERM(map
),
2841 (void *)VM_KERNEL_ADDRPERM(task
)));
2843 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
2844 /* the comm page is likely to be beyond the actual end of the VM map */
2845 vm_flags
= VM_FLAGS_FIXED
;
2846 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2847 vmk_flags
.vmkf_beyond_max
= TRUE
;
2849 /* select the appropriate comm page for this task */
2850 assert(!(is64bit
^ vm_map_is_64bit(map
)));
2852 commpage_handle
= commpage64_handle
;
2853 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
2854 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
2855 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
2856 objc_address
= _COMM_PAGE64_OBJC_BASE
;
2857 commpage_text_handle
= commpage_text64_handle
;
2858 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
2860 commpage_handle
= commpage32_handle
;
2862 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
2863 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
2864 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
2865 objc_address
= _COMM_PAGE32_OBJC_BASE
;
2866 commpage_text_handle
= commpage_text32_handle
;
2867 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
2870 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
2871 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
2872 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
2873 /* the commpage is properly aligned or sized for pmap-nesting */
2874 tag
= VM_MEMORY_SHARED_PMAP
;
2875 vmk_flags
.vmkf_nested_pmap
= TRUE
;
2877 /* map the comm page in the task's address space */
2878 assert(commpage_handle
!= IPC_PORT_NULL
);
2879 kr
= vm_map_enter_mem_object(
2893 if (kr
!= KERN_SUCCESS
) {
2894 SHARED_REGION_TRACE_ERROR(
2895 ("commpage: enter(%p,0x%llx,0x%llx) "
2896 "commpage %p mapping failed 0x%x\n",
2897 (void *)VM_KERNEL_ADDRPERM(map
),
2898 (long long)commpage_address
,
2899 (long long)commpage_size
,
2900 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
2903 /* map the comm text page in the task's address space */
2904 assert(commpage_text_handle
!= IPC_PORT_NULL
);
2905 kr
= vm_map_enter_mem_object(
2907 &commpage_text_address
,
2913 commpage_text_handle
,
2916 VM_PROT_READ
| VM_PROT_EXECUTE
,
2917 VM_PROT_READ
| VM_PROT_EXECUTE
,
2919 if (kr
!= KERN_SUCCESS
) {
2920 SHARED_REGION_TRACE_ERROR(
2921 ("commpage text: enter(%p,0x%llx,0x%llx) "
2922 "commpage text %p mapping failed 0x%x\n",
2923 (void *)VM_KERNEL_ADDRPERM(map
),
2924 (long long)commpage_text_address
,
2925 (long long)commpage_text_size
,
2926 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2930 * Since we're here, we also pre-allocate some virtual space for the
2931 * Objective-C run-time, if needed...
2933 if (objc_size
!= 0) {
2934 kr
= vm_map_enter_mem_object(
2947 VM_INHERIT_DEFAULT
);
2948 if (kr
!= KERN_SUCCESS
) {
2949 SHARED_REGION_TRACE_ERROR(
2950 ("commpage: enter(%p,0x%llx,0x%llx) "
2951 "objc mapping failed 0x%x\n",
2952 (void *)VM_KERNEL_ADDRPERM(map
),
2953 (long long)objc_address
,
2954 (long long)objc_size
, kr
));
2958 SHARED_REGION_TRACE_DEBUG(
2959 ("commpage: enter(%p,%p) <- 0x%x\n",
2960 (void *)VM_KERNEL_ADDRPERM(map
),
2961 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2967 vm_shared_region_slide(uint32_t slide
,
2968 mach_vm_offset_t entry_start_address
,
2969 mach_vm_size_t entry_size
,
2970 mach_vm_offset_t slide_start
,
2971 mach_vm_size_t slide_size
,
2972 mach_vm_offset_t slid_mapping
,
2973 memory_object_control_t sr_file_control
)
2975 void *slide_info_entry
= NULL
;
2977 vm_shared_region_t sr
;
2979 SHARED_REGION_TRACE_DEBUG(
2980 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2981 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2983 sr
= vm_shared_region_get(current_task());
2985 printf("%s: no shared region?\n", __FUNCTION__
);
2986 SHARED_REGION_TRACE_DEBUG(
2987 ("vm_shared_region_slide: <- %d (no shared region)\n",
2989 return KERN_FAILURE
;
2993 * Protect from concurrent access.
2995 vm_shared_region_lock();
2996 while (sr
->sr_slide_in_progress
) {
2997 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
3000 #ifndef CONFIG_EMBEDDED
3001 || shared_region_completed_slide
3004 vm_shared_region_unlock();
3006 vm_shared_region_deallocate(sr
);
3007 printf("%s: shared region already slid?\n", __FUNCTION__
);
3008 SHARED_REGION_TRACE_DEBUG(
3009 ("vm_shared_region_slide: <- %d (already slid)\n",
3011 return KERN_FAILURE
;
3014 sr
->sr_slide_in_progress
= TRUE
;
3015 vm_shared_region_unlock();
3017 error
= vm_shared_region_slide_mapping(sr
,
3019 entry_start_address
,
3025 printf("slide_info initialization failed with kr=%d\n", error
);
3029 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
3030 if (slide_info_entry
== NULL
) {
3031 error
= KERN_FAILURE
;
3033 error
= copyin((user_addr_t
)slide_start
,
3035 (vm_size_t
)slide_size
);
3037 error
= KERN_INVALID_ADDRESS
;
3044 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
3045 error
= KERN_INVALID_ARGUMENT
;
3046 printf("Sanity Check failed for slide_info\n");
3049 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
3050 (void*)(uintptr_t)entry_start_address
,
3051 (unsigned long)entry_size
,
3052 (unsigned long)slide_size
);
3056 vm_shared_region_lock();
3058 assert(sr
->sr_slide_in_progress
);
3059 assert(sr
->sr_slid
== FALSE
);
3060 sr
->sr_slide_in_progress
= FALSE
;
3061 thread_wakeup(&sr
->sr_slide_in_progress
);
3063 if (error
== KERN_SUCCESS
) {
3067 * We don't know how to tear down a slid shared region today, because
3068 * we would have to invalidate all the pages that have been slid
3069 * atomically with respect to anyone mapping the shared region afresh.
3070 * Therefore, take a dangling reference to prevent teardown.
3073 #ifndef CONFIG_EMBEDDED
3074 shared_region_completed_slide
= TRUE
;
3077 vm_shared_region_unlock();
3079 vm_shared_region_deallocate(sr
);
3081 SHARED_REGION_TRACE_DEBUG(
3082 ("vm_shared_region_slide: <- %d\n",
3089 * This is called from powermanagement code to let kernel know the current source of power.
3090 * 0 if it is external source (connected to power )
3091 * 1 if it is internal power source ie battery
3094 #if !CONFIG_EMBEDDED
3095 post_sys_powersource(int i
)
3097 post_sys_powersource(__unused
int i
)
3100 #if !CONFIG_EMBEDDED
3101 post_sys_powersource_internal(i
, 0);
3106 #if !CONFIG_EMBEDDED
3108 post_sys_powersource_internal(int i
, int internal
)
3110 if (internal
== 0) {
3111 __system_power_source
= i
;
3114 if (__commpage_setup
!= 0) {
3115 if (__system_power_source
!= 0) {
3116 commpage_set_spin_count(0);
3118 commpage_set_spin_count(MP_SPIN_TRIES
);