2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment which is defined by:
39 * - Team ID - when we have pointer authentication.
41 * The point of a shared region is to reduce the setup overhead when exec'ing
42 * a new process. A shared region uses a shared VM submap that gets mapped
43 * automatically at exec() time, see vm_map_exec(). The first process of a given
44 * environment sets up the shared region and all further processes in that
45 * environment can re-use that shared region without having to re-create
46 * the same mappings in their VM map. All they need is contained in the shared
49 * The region can also share a pmap (mostly for read-only parts but also for the
50 * initial version of some writable parts), which gets "nested" into the
51 * process's pmap. This reduces the number of soft faults: once one process
52 * brings in a page in the shared region, all the other processes can access
53 * it without having to enter it in their own pmap.
55 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
56 * to map the appropriate shared region in the process's address space.
57 * We look up the appropriate shared region for the process's environment.
58 * If we can't find one, we create a new (empty) one and add it to the list.
59 * Otherwise, we just take an extra reference on the shared region we found.
61 * The "dyld" runtime, mapped into the process's address space at exec() time,
62 * will then use the shared_region_check_np() and shared_region_map_and_slide_np()
63 * system calls to validate and/or populate the shared region with the
64 * appropriate dyld_shared_cache file.
66 * The shared region is inherited on fork() and the child simply takes an
67 * extra reference on its parent's shared region.
69 * When the task terminates, we release the reference on its shared region.
70 * When the last reference is released, we destroy the shared region.
72 * After a chroot(), the calling process keeps using its original shared region,
73 * since that's what was mapped when it was started. But its children
74 * will use a different shared region, because they need to use the shared
75 * cache that's relative to the new root directory.
81 * A "comm page" is an area of memory that is populated by the kernel with
82 * the appropriate platform-specific version of some commonly used code.
83 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
84 * for the native cpu-type. No need to overly optimize translated code
85 * for hardware that is not really there !
87 * The comm pages are created and populated at boot time.
89 * The appropriate comm page is mapped into a process's address space
90 * at exec() time, in vm_map_exec(). It is then inherited on fork().
92 * The comm page is shared between the kernel and all applications of
93 * a given platform. Only the kernel can modify it.
95 * Applications just branch to fixed addresses in the comm page and find
96 * the right version of the code for the platform. There is also some
97 * data provided and updated by the kernel for processes to retrieve easily
98 * without having to do a system call.
103 #include <kern/ipc_tt.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread_call.h>
107 #include <mach/mach_vm.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_shared_region.h>
112 #include <vm/vm_protos.h>
114 #include <machine/commpage.h>
115 #include <machine/cpu_capabilities.h>
116 #include <sys/random.h>
118 #if defined (__arm__) || defined(__arm64__)
119 #include <arm/cpu_data_internal.h>
120 #include <arm/misc_protos.h>
124 * the following codes are used in the subclass
125 * of the DBG_MACH_SHAREDREGION class
127 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
129 #if __has_feature(ptrauth_calls)
131 #endif /* __has_feature(ptrauth_calls) */
133 /* "dyld" uses this to figure out what the kernel supports */
134 int shared_region_version
= 3;
136 /* trace level, output is sent to the system log file */
137 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
139 /* should local (non-chroot) shared regions persist when no task uses them ? */
140 int shared_region_persistence
= 0; /* no by default */
143 /* delay in seconds before reclaiming an unused shared region */
144 TUNABLE_WRITEABLE(int, shared_region_destroy_delay
, "vm_shared_region_destroy_delay", 120);
147 * Cached pointer to the most recently mapped shared region from PID 1, which should
148 * be the most commonly mapped shared region in the system. There are many processes
149 * which do not use this, for a variety of reasons.
151 * The main consumer of this is stackshot.
153 struct vm_shared_region
*primary_system_shared_region
= NULL
;
155 #if XNU_TARGET_OS_OSX
157 * Only one cache gets to slide on Desktop, since we can't
158 * tear down slide info properly today and the desktop actually
159 * produces lots of shared caches.
161 boolean_t shared_region_completed_slide
= FALSE
;
162 #endif /* XNU_TARGET_OS_OSX */
164 /* this lock protects all the shared region data structures */
165 static LCK_GRP_DECLARE(vm_shared_region_lck_grp
, "vm shared region");
166 static LCK_MTX_DECLARE(vm_shared_region_lock
, &vm_shared_region_lck_grp
);
168 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
169 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
170 #define vm_shared_region_sleep(event, interruptible) \
171 lck_mtx_sleep(&vm_shared_region_lock, \
176 /* the list of currently available shared regions (one per environment) */
177 queue_head_t vm_shared_region_queue
= QUEUE_HEAD_INITIALIZER(vm_shared_region_queue
);
178 int vm_shared_region_count
= 0;
179 int vm_shared_region_peak
= 0;
182 * the number of times an event has forced the recalculation of the reslide
183 * shared region slide.
185 #if __has_feature(ptrauth_calls)
186 int vm_shared_region_reslide_count
= 0;
187 #endif /* __has_feature(ptrauth_calls) */
189 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
190 static vm_shared_region_t
vm_shared_region_create(
193 cpu_subtype_t cpu_subtype
,
196 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
198 static kern_return_t
vm_shared_region_slide_sanity_check(vm_shared_region_slide_info_entry_t entry
, mach_vm_size_t size
);
199 static void vm_shared_region_timeout(thread_call_param_t param0
,
200 thread_call_param_t param1
);
201 static kern_return_t
vm_shared_region_slide_mapping(
202 vm_shared_region_t sr
,
203 user_addr_t slide_info_addr
,
204 mach_vm_size_t slide_info_size
,
205 mach_vm_offset_t start
,
207 mach_vm_offset_t slid_mapping
,
209 memory_object_control_t
,
210 vm_prot_t prot
); /* forward */
212 static int __commpage_setup
= 0;
213 #if XNU_TARGET_OS_OSX
214 static int __system_power_source
= 1; /* init to extrnal power source */
215 static void post_sys_powersource_internal(int i
, int internal
);
216 #endif /* XNU_TARGET_OS_OSX */
218 extern u_int32_t
random(void);
221 * Retrieve a task's shared region and grab an extra reference to
222 * make sure it doesn't disappear while the caller is using it.
223 * The caller is responsible for consuming that extra reference if
227 vm_shared_region_get(
230 vm_shared_region_t shared_region
;
232 SHARED_REGION_TRACE_DEBUG(
233 ("shared_region: -> get(%p)\n",
234 (void *)VM_KERNEL_ADDRPERM(task
)));
237 vm_shared_region_lock();
238 shared_region
= task
->shared_region
;
240 assert(shared_region
->sr_ref_count
> 0);
241 vm_shared_region_reference_locked(shared_region
);
243 vm_shared_region_unlock();
246 SHARED_REGION_TRACE_DEBUG(
247 ("shared_region: get(%p) <- %p\n",
248 (void *)VM_KERNEL_ADDRPERM(task
),
249 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
251 return shared_region
;
255 vm_shared_region_vm_map(
256 vm_shared_region_t shared_region
)
258 ipc_port_t sr_handle
;
259 vm_named_entry_t sr_mem_entry
;
262 SHARED_REGION_TRACE_DEBUG(
263 ("shared_region: -> vm_map(%p)\n",
264 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
265 assert(shared_region
->sr_ref_count
> 0);
267 sr_handle
= shared_region
->sr_mem_entry
;
268 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
269 sr_map
= sr_mem_entry
->backing
.map
;
270 assert(sr_mem_entry
->is_sub_map
);
272 SHARED_REGION_TRACE_DEBUG(
273 ("shared_region: vm_map(%p) <- %p\n",
274 (void *)VM_KERNEL_ADDRPERM(shared_region
),
275 (void *)VM_KERNEL_ADDRPERM(sr_map
)));
280 * Set the shared region the process should use.
281 * A NULL new shared region means that we just want to release the old
283 * The caller should already have an extra reference on the new shared region
284 * (if any). We release a reference on the old shared region (if any).
287 vm_shared_region_set(
289 vm_shared_region_t new_shared_region
)
291 vm_shared_region_t old_shared_region
;
293 SHARED_REGION_TRACE_DEBUG(
294 ("shared_region: -> set(%p, %p)\n",
295 (void *)VM_KERNEL_ADDRPERM(task
),
296 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
299 vm_shared_region_lock();
301 old_shared_region
= task
->shared_region
;
302 if (new_shared_region
) {
303 assert(new_shared_region
->sr_ref_count
> 0);
306 task
->shared_region
= new_shared_region
;
308 vm_shared_region_unlock();
311 if (old_shared_region
) {
312 assert(old_shared_region
->sr_ref_count
> 0);
313 vm_shared_region_deallocate(old_shared_region
);
316 SHARED_REGION_TRACE_DEBUG(
317 ("shared_region: set(%p) <- old=%p new=%p\n",
318 (void *)VM_KERNEL_ADDRPERM(task
),
319 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
320 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
324 * Lookup up the shared region for the desired environment.
325 * If none is found, create a new (empty) one.
326 * Grab an extra reference on the returned shared region, to make sure
327 * it doesn't get destroyed before the caller is done with it. The caller
328 * is responsible for consuming that extra reference if necessary.
331 vm_shared_region_lookup(
334 cpu_subtype_t cpu_subtype
,
338 vm_shared_region_t shared_region
;
339 vm_shared_region_t new_shared_region
;
341 SHARED_REGION_TRACE_DEBUG(
342 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n",
343 (void *)VM_KERNEL_ADDRPERM(root_dir
),
344 cputype
, cpu_subtype
, is_64bit
, reslide
));
346 shared_region
= NULL
;
347 new_shared_region
= NULL
;
349 vm_shared_region_lock();
351 queue_iterate(&vm_shared_region_queue
,
355 assert(shared_region
->sr_ref_count
> 0);
356 if (shared_region
->sr_cpu_type
== cputype
&&
357 #if !__has_feature(ptrauth_calls) /* arm64e/arm64 use same region */
358 shared_region
->sr_cpu_subtype
== cpu_subtype
&&
359 #endif /* !__has_feature(ptrauth_calls) */
360 shared_region
->sr_root_dir
== root_dir
&&
361 shared_region
->sr_64bit
== is_64bit
&&
362 #if __has_feature(ptrauth_calls)
363 shared_region
->sr_reslide
== reslide
&&
364 #endif /* __has_feature(ptrauth_calls) */
365 !shared_region
->sr_stale
) {
366 /* found a match ! */
367 vm_shared_region_reference_locked(shared_region
);
371 if (new_shared_region
== NULL
) {
372 /* no match: create a new one */
373 vm_shared_region_unlock();
374 new_shared_region
= vm_shared_region_create(root_dir
,
379 /* do the lookup again, in case we lost a race */
380 vm_shared_region_lock();
383 /* still no match: use our new one */
384 shared_region
= new_shared_region
;
385 new_shared_region
= NULL
;
386 queue_enter(&vm_shared_region_queue
,
390 vm_shared_region_count
++;
391 if (vm_shared_region_count
> vm_shared_region_peak
) {
392 vm_shared_region_peak
= vm_shared_region_count
;
398 vm_shared_region_unlock();
400 if (new_shared_region
) {
402 * We lost a race with someone else to create a new shared
403 * region for that environment. Get rid of our unused one.
405 assert(new_shared_region
->sr_ref_count
== 1);
406 new_shared_region
->sr_ref_count
--;
407 vm_shared_region_destroy(new_shared_region
);
408 new_shared_region
= NULL
;
411 SHARED_REGION_TRACE_DEBUG(
412 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d) <- %p\n",
413 (void *)VM_KERNEL_ADDRPERM(root_dir
),
414 cputype
, cpu_subtype
, is_64bit
, reslide
,
415 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
417 assert(shared_region
->sr_ref_count
> 0);
418 return shared_region
;
422 * Take an extra reference on a shared region.
423 * The vm_shared_region_lock should already be held by the caller.
426 vm_shared_region_reference_locked(
427 vm_shared_region_t shared_region
)
429 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
431 SHARED_REGION_TRACE_DEBUG(
432 ("shared_region: -> reference_locked(%p)\n",
433 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
434 assert(shared_region
->sr_ref_count
> 0);
435 shared_region
->sr_ref_count
++;
436 assert(shared_region
->sr_ref_count
!= 0);
438 if (shared_region
->sr_timer_call
!= NULL
) {
441 /* cancel and free any pending timeout */
442 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
444 thread_call_free(shared_region
->sr_timer_call
);
445 shared_region
->sr_timer_call
= NULL
;
446 /* release the reference held by the cancelled timer */
447 shared_region
->sr_ref_count
--;
449 /* the timer will drop the reference and free itself */
453 SHARED_REGION_TRACE_DEBUG(
454 ("shared_region: reference_locked(%p) <- %d\n",
455 (void *)VM_KERNEL_ADDRPERM(shared_region
),
456 shared_region
->sr_ref_count
));
460 * Take a reference on a shared region.
463 vm_shared_region_reference(vm_shared_region_t shared_region
)
465 SHARED_REGION_TRACE_DEBUG(
466 ("shared_region: -> reference(%p)\n",
467 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
469 vm_shared_region_lock();
470 vm_shared_region_reference_locked(shared_region
);
471 vm_shared_region_unlock();
473 SHARED_REGION_TRACE_DEBUG(
474 ("shared_region: reference(%p) <- %d\n",
475 (void *)VM_KERNEL_ADDRPERM(shared_region
),
476 shared_region
->sr_ref_count
));
480 * Release a reference on the shared region.
481 * Destroy it if there are no references left.
484 vm_shared_region_deallocate(
485 vm_shared_region_t shared_region
)
487 SHARED_REGION_TRACE_DEBUG(
488 ("shared_region: -> deallocate(%p)\n",
489 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
491 vm_shared_region_lock();
493 assert(shared_region
->sr_ref_count
> 0);
495 if (shared_region
->sr_root_dir
== NULL
) {
497 * Local (i.e. based on the boot volume) shared regions
498 * can persist or not based on the "shared_region_persistence"
500 * Make sure that this one complies.
502 * See comments in vm_shared_region_slide() for notes about
503 * shared regions we have slid (which are not torn down currently).
505 if (shared_region_persistence
&&
506 !shared_region
->sr_persists
) {
507 /* make this one persistent */
508 shared_region
->sr_ref_count
++;
509 shared_region
->sr_persists
= TRUE
;
510 } else if (!shared_region_persistence
&&
511 shared_region
->sr_persists
) {
512 /* make this one no longer persistent */
513 assert(shared_region
->sr_ref_count
> 1);
514 shared_region
->sr_ref_count
--;
515 shared_region
->sr_persists
= FALSE
;
519 assert(shared_region
->sr_ref_count
> 0);
520 shared_region
->sr_ref_count
--;
521 SHARED_REGION_TRACE_DEBUG(
522 ("shared_region: deallocate(%p): ref now %d\n",
523 (void *)VM_KERNEL_ADDRPERM(shared_region
),
524 shared_region
->sr_ref_count
));
526 if (shared_region
->sr_ref_count
== 0) {
530 * Even though a shared region is unused, delay a while before
531 * tearing it down, in case a new app launch can use it.
533 if (shared_region
->sr_timer_call
== NULL
&&
534 shared_region_destroy_delay
!= 0 &&
535 !shared_region
->sr_stale
) {
536 /* hold one reference for the timer */
537 assert(!shared_region
->sr_mapping_in_progress
);
538 shared_region
->sr_ref_count
++;
540 /* set up the timer */
541 shared_region
->sr_timer_call
= thread_call_allocate(
542 (thread_call_func_t
) vm_shared_region_timeout
,
543 (thread_call_param_t
) shared_region
);
545 /* schedule the timer */
546 clock_interval_to_deadline(shared_region_destroy_delay
,
549 thread_call_enter_delayed(shared_region
->sr_timer_call
,
552 SHARED_REGION_TRACE_DEBUG(
553 ("shared_region: deallocate(%p): armed timer\n",
554 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
556 vm_shared_region_unlock();
558 /* timer expired: let go of this shared region */
560 /* Make sure there's no cached pointer to the region. */
561 if (primary_system_shared_region
== shared_region
) {
562 primary_system_shared_region
= NULL
;
566 * Remove it from the queue first, so no one can find
569 queue_remove(&vm_shared_region_queue
,
573 vm_shared_region_count
--;
574 vm_shared_region_unlock();
576 /* ... and destroy it */
577 vm_shared_region_destroy(shared_region
);
578 shared_region
= NULL
;
581 vm_shared_region_unlock();
584 SHARED_REGION_TRACE_DEBUG(
585 ("shared_region: deallocate(%p) <-\n",
586 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
590 vm_shared_region_timeout(
591 thread_call_param_t param0
,
592 __unused thread_call_param_t param1
)
594 vm_shared_region_t shared_region
;
596 shared_region
= (vm_shared_region_t
) param0
;
598 vm_shared_region_deallocate(shared_region
);
603 * Create a new (empty) shared region for a new environment.
605 static vm_shared_region_t
606 vm_shared_region_create(
609 cpu_subtype_t cpu_subtype
,
611 #if !__has_feature(ptrauth_calls)
613 #endif /* __has_feature(ptrauth_calls) */
617 vm_named_entry_t mem_entry
;
618 ipc_port_t mem_entry_port
;
619 vm_shared_region_t shared_region
;
621 mach_vm_offset_t base_address
, pmap_nesting_start
;
622 mach_vm_size_t size
, pmap_nesting_size
;
624 SHARED_REGION_TRACE_INFO(
625 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n",
626 (void *)VM_KERNEL_ADDRPERM(root_dir
),
627 cputype
, cpu_subtype
, is_64bit
, reslide
));
632 mem_entry_port
= IPC_PORT_NULL
;
633 sub_map
= VM_MAP_NULL
;
635 /* create a new shared region structure... */
636 shared_region
= kalloc(sizeof(*shared_region
));
637 if (shared_region
== NULL
) {
638 SHARED_REGION_TRACE_ERROR(
639 ("shared_region: create: couldn't allocate\n"));
643 /* figure out the correct settings for the desired environment */
646 #if defined(__arm64__)
648 base_address
= SHARED_REGION_BASE_ARM64
;
649 size
= SHARED_REGION_SIZE_ARM64
;
650 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
651 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
653 #elif !defined(__arm__)
655 base_address
= SHARED_REGION_BASE_X86_64
;
656 size
= SHARED_REGION_SIZE_X86_64
;
657 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
658 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
660 case CPU_TYPE_POWERPC
:
661 base_address
= SHARED_REGION_BASE_PPC64
;
662 size
= SHARED_REGION_SIZE_PPC64
;
663 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
664 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
668 SHARED_REGION_TRACE_ERROR(
669 ("shared_region: create: unknown cpu type %d\n",
671 kfree(shared_region
, sizeof(*shared_region
));
672 shared_region
= NULL
;
677 #if defined(__arm__) || defined(__arm64__)
679 base_address
= SHARED_REGION_BASE_ARM
;
680 size
= SHARED_REGION_SIZE_ARM
;
681 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
682 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
686 base_address
= SHARED_REGION_BASE_I386
;
687 size
= SHARED_REGION_SIZE_I386
;
688 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
689 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
691 case CPU_TYPE_POWERPC
:
692 base_address
= SHARED_REGION_BASE_PPC
;
693 size
= SHARED_REGION_SIZE_PPC
;
694 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
695 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
699 SHARED_REGION_TRACE_ERROR(
700 ("shared_region: create: unknown cpu type %d\n",
702 kfree(shared_region
, sizeof(*shared_region
));
703 shared_region
= NULL
;
708 /* create a memory entry structure and a Mach port handle */
709 kr
= mach_memory_entry_allocate(&mem_entry
, &mem_entry_port
);
710 if (kr
!= KERN_SUCCESS
) {
711 kfree(shared_region
, sizeof(*shared_region
));
712 shared_region
= NULL
;
713 SHARED_REGION_TRACE_ERROR(
714 ("shared_region: create: "
715 "couldn't allocate mem_entry\n"));
719 #if defined(__arm__) || defined(__arm64__)
721 struct pmap
*pmap_nested
;
723 pmap_flags
|= is_64bit
? PMAP_CREATE_64BIT
: 0;
726 pmap_nested
= pmap_create_options(NULL
, 0, pmap_flags
);
727 if (pmap_nested
!= PMAP_NULL
) {
728 pmap_set_nested(pmap_nested
);
729 sub_map
= vm_map_create(pmap_nested
, 0, (vm_map_offset_t
)size
, TRUE
);
730 #if defined(__arm64__)
732 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
733 /* enforce 16KB alignment of VM map entries */
734 vm_map_set_page_shift(sub_map
,
735 SIXTEENK_PAGE_SHIFT
);
738 #elif (__ARM_ARCH_7K__ >= 2)
739 /* enforce 16KB alignment for watch targets with new ABI */
740 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
741 #endif /* __arm64__ */
743 sub_map
= VM_MAP_NULL
;
747 /* create a VM sub map and its pmap */
748 sub_map
= vm_map_create(pmap_create_options(NULL
, 0, is_64bit
), 0, size
, TRUE
);
750 if (sub_map
== VM_MAP_NULL
) {
751 ipc_port_release_send(mem_entry_port
);
752 kfree(shared_region
, sizeof(*shared_region
));
753 shared_region
= NULL
;
754 SHARED_REGION_TRACE_ERROR(("shared_region: create: couldn't allocate map\n"));
758 /* shared regions should always enforce code-signing */
759 vm_map_cs_enforcement_set(sub_map
, true);
760 assert(vm_map_cs_enforcement(sub_map
));
761 assert(pmap_get_vm_map_cs_enforced(vm_map_pmap(sub_map
)));
763 assert(!sub_map
->disable_vmentry_reuse
);
764 sub_map
->is_nested_map
= TRUE
;
766 /* make the memory entry point to the VM sub map */
767 mem_entry
->is_sub_map
= TRUE
;
768 mem_entry
->backing
.map
= sub_map
;
769 mem_entry
->size
= size
;
770 mem_entry
->protection
= VM_PROT_ALL
;
772 /* make the shared region point at the memory entry */
773 shared_region
->sr_mem_entry
= mem_entry_port
;
775 /* fill in the shared region's environment and settings */
776 shared_region
->sr_base_address
= base_address
;
777 shared_region
->sr_size
= size
;
778 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
779 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
780 shared_region
->sr_cpu_type
= cputype
;
781 shared_region
->sr_cpu_subtype
= cpu_subtype
;
782 shared_region
->sr_64bit
= (uint8_t)is_64bit
;
783 shared_region
->sr_root_dir
= root_dir
;
785 queue_init(&shared_region
->sr_q
);
786 shared_region
->sr_mapping_in_progress
= FALSE
;
787 shared_region
->sr_slide_in_progress
= FALSE
;
788 shared_region
->sr_persists
= FALSE
;
789 shared_region
->sr_stale
= FALSE
;
790 shared_region
->sr_timer_call
= NULL
;
791 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
793 /* grab a reference for the caller */
794 shared_region
->sr_ref_count
= 1;
796 shared_region
->sr_slide
= 0; /* not slid yet */
798 /* Initialize UUID and other metadata */
799 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
800 shared_region
->sr_uuid_copied
= FALSE
;
801 shared_region
->sr_images_count
= 0;
802 shared_region
->sr_images
= NULL
;
803 #if __has_feature(ptrauth_calls)
804 shared_region
->sr_reslide
= reslide
;
805 shared_region
->sr_num_auth_section
= 0;
806 for (uint_t i
= 0; i
< NUM_SR_AUTH_SECTIONS
; ++i
) {
807 shared_region
->sr_auth_section
[i
] = NULL
;
809 shared_region
->sr_num_auth_section
= 0;
810 #endif /* __has_feature(ptrauth_calls) */
814 SHARED_REGION_TRACE_INFO(
815 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d"
816 "base=0x%llx,size=0x%llx) <- "
817 "%p mem=(%p,%p) map=%p pmap=%p\n",
818 (void *)VM_KERNEL_ADDRPERM(root_dir
),
819 cputype
, cpu_subtype
, is_64bit
, reslide
,
820 (long long)base_address
,
822 (void *)VM_KERNEL_ADDRPERM(shared_region
),
823 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
824 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
825 (void *)VM_KERNEL_ADDRPERM(sub_map
),
826 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
828 SHARED_REGION_TRACE_INFO(
829 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
830 "base=0x%llx,size=0x%llx) <- NULL",
831 (void *)VM_KERNEL_ADDRPERM(root_dir
),
832 cputype
, cpu_subtype
, is_64bit
,
833 (long long)base_address
,
836 return shared_region
;
840 * Destroy a now-unused shared region.
841 * The shared region is no longer in the queue and can not be looked up.
844 vm_shared_region_destroy(
845 vm_shared_region_t shared_region
)
847 vm_named_entry_t mem_entry
;
850 SHARED_REGION_TRACE_INFO(
851 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
852 (void *)VM_KERNEL_ADDRPERM(shared_region
),
853 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
854 shared_region
->sr_cpu_type
,
855 shared_region
->sr_cpu_subtype
,
856 shared_region
->sr_64bit
));
858 assert(shared_region
->sr_ref_count
== 0);
859 assert(!shared_region
->sr_persists
);
861 mem_entry
= (vm_named_entry_t
) ip_get_kobject(shared_region
->sr_mem_entry
);
862 assert(mem_entry
->is_sub_map
);
863 assert(!mem_entry
->internal
);
864 assert(!mem_entry
->is_copy
);
865 map
= mem_entry
->backing
.map
;
868 * Clean up the pmap first. The virtual addresses that were
869 * entered in this possibly "nested" pmap may have different values
870 * than the VM map's min and max offsets, if the VM sub map was
871 * mapped at a non-zero offset in the processes' main VM maps, which
872 * is usually the case, so the clean-up we do in vm_map_destroy() would
876 pmap_remove(map
->pmap
,
877 (vm_map_offset_t
)shared_region
->sr_base_address
,
878 (vm_map_offset_t
)(shared_region
->sr_base_address
+ shared_region
->sr_size
));
882 * Release our (one and only) handle on the memory entry.
883 * This will generate a no-senders notification, which will be processed
884 * by ipc_kobject_notify(), which will release the one and only
885 * reference on the memory entry and cause it to be destroyed, along
886 * with the VM sub map and its pmap.
888 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
890 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
892 if (shared_region
->sr_timer_call
) {
893 thread_call_free(shared_region
->sr_timer_call
);
896 #if __has_feature(ptrauth_calls)
898 * Free the cached copies of slide_info for the AUTH regions.
900 for (uint_t i
= 0; i
< shared_region
->sr_num_auth_section
; ++i
) {
901 vm_shared_region_slide_info_t si
= shared_region
->sr_auth_section
[i
];
903 vm_object_deallocate(si
->si_slide_object
);
904 kheap_free(KHEAP_DATA_BUFFERS
, si
->si_slide_info_entry
, si
->si_slide_info_size
);
905 kfree(si
, sizeof *si
);
906 shared_region
->sr_auth_section
[i
] = NULL
;
909 shared_region
->sr_num_auth_section
= 0;
910 #endif /* __has_feature(ptrauth_calls) */
912 /* release the shared region structure... */
913 kfree(shared_region
, sizeof(*shared_region
));
915 SHARED_REGION_TRACE_DEBUG(
916 ("shared_region: destroy(%p) <-\n",
917 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
918 shared_region
= NULL
;
922 * Gets the address of the first (in time) mapping in the shared region.
923 * If used during initial task setup by dyld, task should non-NULL.
926 vm_shared_region_start_address(
927 vm_shared_region_t shared_region
,
928 mach_vm_offset_t
*start_address
,
932 mach_vm_offset_t sr_base_address
;
933 mach_vm_offset_t sr_first_mapping
;
935 SHARED_REGION_TRACE_DEBUG(
936 ("shared_region: -> start_address(%p)\n",
937 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
939 vm_shared_region_lock();
942 * Wait if there's another thread establishing a mapping
943 * in this shared region right when we're looking at it.
944 * We want a consistent view of the map...
946 while (shared_region
->sr_mapping_in_progress
) {
947 /* wait for our turn... */
948 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
951 assert(!shared_region
->sr_mapping_in_progress
);
952 assert(shared_region
->sr_ref_count
> 0);
954 sr_base_address
= shared_region
->sr_base_address
;
955 sr_first_mapping
= shared_region
->sr_first_mapping
;
957 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
958 /* shared region is empty */
959 kr
= KERN_INVALID_ADDRESS
;
962 *start_address
= sr_base_address
+ sr_first_mapping
;
966 uint32_t slide
= shared_region
->sr_slide
;
968 vm_shared_region_unlock();
971 * Cache shared region info in the task for telemetry gathering, if we're
972 * passed in the task. No task lock here as we're still in intial task set up.
974 if (kr
== KERN_SUCCESS
&& task
!= NULL
&& task
->task_shared_region_slide
== -1) {
975 uint_t sc_header_uuid_offset
= offsetof(struct _dyld_cache_header
, uuid
);
976 if (copyin((user_addr_t
)(*start_address
+ sc_header_uuid_offset
),
977 (char *)&task
->task_shared_region_uuid
,
978 sizeof(task
->task_shared_region_uuid
)) == 0) {
979 task
->task_shared_region_slide
= slide
;
983 SHARED_REGION_TRACE_DEBUG(
984 ("shared_region: start_address(%p) <- 0x%llx\n",
985 (void *)VM_KERNEL_ADDRPERM(shared_region
),
986 (long long)shared_region
->sr_base_address
));
992 * Look up a pre-existing mapping in shared region, for replacement.
993 * Takes an extra object reference if found.
996 find_mapping_to_slide(vm_map_t map
, vm_map_address_t addr
, vm_map_entry_t entry
)
998 vm_map_entry_t found
;
1000 /* find the shared region's map entry to slide */
1001 vm_map_lock_read(map
);
1002 if (!vm_map_lookup_entry(map
, addr
, &found
)) {
1003 /* no mapping there */
1005 return KERN_INVALID_ARGUMENT
;
1009 /* extra ref to keep object alive while map is unlocked */
1010 vm_object_reference(VME_OBJECT(found
));
1011 vm_map_unlock_read(map
);
1012 return KERN_SUCCESS
;
1015 #if __has_feature(ptrauth_calls)
1018 * Determine if this task is actually using pointer signing.
1021 task_sign_pointers(task_t task
)
1025 !task
->map
->pmap
->disable_jop
) {
1032 * If the shared region contains mappings that are authenticated, then
1033 * remap them into the task private map.
1035 * Failures are possible in this routine when jetsam kills a process
1036 * just as dyld is trying to set it up. The vm_map and task shared region
1037 * info get torn down w/o waiting for this thread to finish up.
1039 __attribute__((noinline
))
1041 vm_shared_region_auth_remap(vm_shared_region_t sr
)
1043 memory_object_t sr_pager
= MEMORY_OBJECT_NULL
;
1044 task_t task
= current_task();
1045 vm_shared_region_slide_info_t si
;
1049 struct vm_map_entry tmp_entry_store
= {0};
1050 vm_map_entry_t tmp_entry
= NULL
;
1052 vm_map_kernel_flags_t vmk_flags
;
1053 vm_map_offset_t map_addr
;
1054 kern_return_t kr
= KERN_SUCCESS
;
1055 boolean_t use_ptr_auth
= task_sign_pointers(task
);
1058 * Don't do this more than once and avoid any race conditions in finishing it.
1060 vm_shared_region_lock();
1061 while (sr
->sr_mapping_in_progress
) {
1062 /* wait for our turn... */
1063 vm_shared_region_sleep(&sr
->sr_mapping_in_progress
, THREAD_UNINT
);
1065 assert(!sr
->sr_mapping_in_progress
);
1066 assert(sr
->sr_ref_count
> 0);
1068 /* Just return if already done. */
1069 if (task
->shared_region_auth_remapped
) {
1070 vm_shared_region_unlock();
1071 return KERN_SUCCESS
;
1074 /* let others know to wait while we're working in this shared region */
1075 sr
->sr_mapping_in_progress
= TRUE
;
1076 vm_shared_region_unlock();
1079 * Remap any sections with pointer authentications into the private map.
1081 for (i
= 0; i
< sr
->sr_num_auth_section
; ++i
) {
1082 si
= sr
->sr_auth_section
[i
];
1084 assert(si
->si_ptrauth
);
1087 * We have mapping that needs to be private.
1088 * Look for an existing slid mapping's pager with matching
1089 * object, offset, slide info and shared_region_id to reuse.
1091 object
= si
->si_slide_object
;
1092 sr_pager
= shared_region_pager_match(object
, si
->si_start
, si
,
1093 use_ptr_auth
? task
->jop_pid
: 0);
1094 if (sr_pager
== MEMORY_OBJECT_NULL
) {
1100 * verify matching jop_pid for this task and this pager
1103 shared_region_pager_match_task_key(sr_pager
, task
);
1106 sr_map
= vm_shared_region_vm_map(sr
);
1109 kr
= find_mapping_to_slide(sr_map
, si
->si_slid_address
- sr
->sr_base_address
, &tmp_entry_store
);
1110 if (kr
!= KERN_SUCCESS
) {
1113 tmp_entry
= &tmp_entry_store
;
1116 * Check that the object exactly covers the region to slide.
1118 if (tmp_entry
->vme_end
- tmp_entry
->vme_start
!= si
->si_end
- si
->si_start
) {
1124 * map the pager over the portion of the mapping that needs sliding
1126 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
1127 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1128 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
1129 map_addr
= si
->si_slid_address
;
1130 kr
= vm_map_enter_mem_object(task
->map
,
1132 si
->si_end
- si
->si_start
,
1133 (mach_vm_offset_t
) 0,
1136 VM_KERN_MEMORY_NONE
,
1137 (ipc_port_t
)(uintptr_t) sr_pager
,
1140 tmp_entry
->protection
,
1141 tmp_entry
->max_protection
,
1142 tmp_entry
->inheritance
);
1143 memory_object_deallocate(sr_pager
);
1144 sr_pager
= MEMORY_OBJECT_NULL
;
1145 if (kr
!= KERN_SUCCESS
) {
1148 assertf(map_addr
== si
->si_slid_address
,
1149 "map_addr=0x%llx si_slid_address=0x%llx tmp_entry=%p\n",
1151 (uint64_t)si
->si_slid_address
,
1154 /* Drop the ref count grabbed by find_mapping_to_slide */
1155 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1161 /* Drop the ref count grabbed by find_mapping_to_slide */
1162 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1167 * Drop any extra reference to the pager in case we're quitting due to an error above.
1169 if (sr_pager
!= MEMORY_OBJECT_NULL
) {
1170 memory_object_deallocate(sr_pager
);
1174 * Mark the region as having it's auth sections remapped.
1176 vm_shared_region_lock();
1177 task
->shared_region_auth_remapped
= TRUE
;
1178 sr
->sr_mapping_in_progress
= FALSE
;
1179 thread_wakeup((event_t
)&sr
->sr_mapping_in_progress
);
1180 vm_shared_region_unlock();
1183 #endif /* __has_feature(ptrauth_calls) */
1186 vm_shared_region_undo_mappings(
1188 mach_vm_offset_t sr_base_address
,
1189 struct _sr_file_mappings
*srf_mappings
,
1190 struct _sr_file_mappings
*srf_mappings_current
,
1191 unsigned int srf_current_mappings_count
)
1194 vm_shared_region_t shared_region
= NULL
;
1195 boolean_t reset_shared_region_state
= FALSE
;
1196 struct _sr_file_mappings
*srfmp
;
1197 unsigned int mappings_count
;
1198 struct shared_file_mapping_slide_np
*mappings
;
1200 shared_region
= vm_shared_region_get(current_task());
1201 if (shared_region
== NULL
) {
1202 printf("Failed to undo mappings because of NULL shared region.\n");
1206 if (sr_map
== NULL
) {
1207 ipc_port_t sr_handle
;
1208 vm_named_entry_t sr_mem_entry
;
1210 vm_shared_region_lock();
1211 assert(shared_region
->sr_ref_count
> 0);
1213 while (shared_region
->sr_mapping_in_progress
) {
1214 /* wait for our turn... */
1215 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1218 assert(!shared_region
->sr_mapping_in_progress
);
1219 assert(shared_region
->sr_ref_count
> 0);
1220 /* let others know we're working in this shared region */
1221 shared_region
->sr_mapping_in_progress
= TRUE
;
1223 vm_shared_region_unlock();
1225 reset_shared_region_state
= TRUE
;
1227 /* no need to lock because this data is never modified... */
1228 sr_handle
= shared_region
->sr_mem_entry
;
1229 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1230 sr_map
= sr_mem_entry
->backing
.map
;
1231 sr_base_address
= shared_region
->sr_base_address
;
1234 * Undo the mappings we've established so far.
1236 for (srfmp
= &srf_mappings
[0];
1237 srfmp
<= srf_mappings_current
;
1239 mappings
= srfmp
->mappings
;
1240 mappings_count
= srfmp
->mappings_count
;
1241 if (srfmp
== srf_mappings_current
) {
1242 mappings_count
= srf_current_mappings_count
;
1245 for (j
= 0; j
< mappings_count
; j
++) {
1248 if (mappings
[j
].sms_size
== 0) {
1250 * We didn't establish this
1251 * mapping, so nothing to undo.
1255 SHARED_REGION_TRACE_INFO(
1256 ("shared_region: mapping[%d]: "
1257 "address:0x%016llx "
1260 "maxprot:0x%x prot:0x%x: "
1263 (long long)mappings
[j
].sms_address
,
1264 (long long)mappings
[j
].sms_size
,
1265 (long long)mappings
[j
].sms_file_offset
,
1266 mappings
[j
].sms_max_prot
,
1267 mappings
[j
].sms_init_prot
));
1268 kr2
= mach_vm_deallocate(
1270 (mappings
[j
].sms_address
-
1272 mappings
[j
].sms_size
);
1273 assert(kr2
== KERN_SUCCESS
);
1277 if (reset_shared_region_state
) {
1278 vm_shared_region_lock();
1279 assert(shared_region
->sr_ref_count
> 0);
1280 assert(shared_region
->sr_mapping_in_progress
);
1281 /* we're done working on that shared region */
1282 shared_region
->sr_mapping_in_progress
= FALSE
;
1283 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1284 vm_shared_region_unlock();
1285 reset_shared_region_state
= FALSE
;
1288 vm_shared_region_deallocate(shared_region
);
1292 * For now we only expect to see at most 4 regions to relocate/authenticate
1293 * per file. One that's RW VM_PROT_SLIDE and one VM_PROT_SLIDE | VM_PROT_NOAUTH.
1294 * And then RO VM_PROT_SLIDE and one VM_PROT_SLIDE | VM_PROT_NOAUTH.
1296 #define VMSR_NUM_SLIDES 4
1299 * First part of vm_shared_region_map_file(). Split out to
1300 * avoid kernel stack overflow.
1302 __attribute__((noinline
))
1303 static kern_return_t
1304 vm_shared_region_map_file_setup(
1305 vm_shared_region_t shared_region
,
1306 int sr_file_mappings_count
,
1307 struct _sr_file_mappings
*sr_file_mappings
,
1308 unsigned int *mappings_to_slide_cnt
,
1309 struct shared_file_mapping_slide_np
**mappings_to_slide
,
1310 mach_vm_offset_t
*slid_mappings
,
1311 memory_object_control_t
*slid_file_controls
,
1312 mach_vm_offset_t
*first_mapping
,
1313 mach_vm_offset_t
*file_first_mappings
,
1314 mach_vm_offset_t
*sfm_min_address
,
1315 mach_vm_offset_t
*sfm_max_address
,
1316 vm_map_t
*sr_map_ptr
,
1317 vm_map_offset_t
*lowest_unnestable_addr_ptr
)
1319 kern_return_t kr
= KERN_SUCCESS
;
1320 memory_object_control_t file_control
;
1321 vm_object_t file_object
;
1322 ipc_port_t sr_handle
;
1323 vm_named_entry_t sr_mem_entry
;
1325 mach_vm_offset_t sr_base_address
;
1327 mach_port_t map_port
;
1328 vm_map_offset_t target_address
;
1330 vm_object_size_t obj_size
;
1331 vm_map_offset_t lowest_unnestable_addr
= 0;
1332 vm_map_kernel_flags_t vmk_flags
;
1333 mach_vm_offset_t sfm_end
;
1334 uint32_t mappings_count
;
1335 struct shared_file_mapping_slide_np
*mappings
;
1336 struct _sr_file_mappings
*srfmp
;
1337 unsigned int current_file_index
= 0;
1339 vm_shared_region_lock();
1340 assert(shared_region
->sr_ref_count
> 0);
1343 * Make sure we handle only one mapping at a time in a given
1344 * shared region, to avoid race conditions. This should not
1345 * happen frequently...
1347 while (shared_region
->sr_mapping_in_progress
) {
1348 /* wait for our turn... */
1349 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1352 assert(!shared_region
->sr_mapping_in_progress
);
1353 assert(shared_region
->sr_ref_count
> 0);
1354 /* let others know we're working in this shared region */
1355 shared_region
->sr_mapping_in_progress
= TRUE
;
1357 vm_shared_region_unlock();
1359 /* no need to lock because this data is never modified... */
1360 sr_handle
= shared_region
->sr_mem_entry
;
1361 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1362 sr_map
= sr_mem_entry
->backing
.map
;
1363 sr_base_address
= shared_region
->sr_base_address
;
1365 SHARED_REGION_TRACE_DEBUG(
1366 ("shared_region: -> map(%p)\n",
1367 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
1373 /* process all the files to be mapped */
1374 for (srfmp
= &sr_file_mappings
[0];
1375 srfmp
< &sr_file_mappings
[sr_file_mappings_count
];
1377 mappings_count
= srfmp
->mappings_count
;
1378 mappings
= srfmp
->mappings
;
1379 file_control
= srfmp
->file_control
;
1381 if (mappings_count
== 0) {
1382 /* no mappings here... */
1387 * The code below can only correctly "slide" (perform relocations) for one
1388 * value of the slide amount. So if a file has a non-zero slide, it has to
1389 * match any previous value. A zero slide value is ok for things that are
1390 * just directly mapped.
1392 if (shared_region
->sr_slide
== 0 && srfmp
->slide
!= 0) {
1393 shared_region
->sr_slide
= srfmp
->slide
;
1394 } else if (shared_region
->sr_slide
!= 0 &&
1395 srfmp
->slide
!= 0 &&
1396 shared_region
->sr_slide
!= srfmp
->slide
) {
1397 SHARED_REGION_TRACE_ERROR(
1398 ("shared_region: more than 1 non-zero slide value amount "
1399 "slide 1:0x%x slide 2:0x%x\n ",
1400 shared_region
->sr_slide
, srfmp
->slide
));
1401 kr
= KERN_INVALID_ARGUMENT
;
1406 if ((shared_region
->sr_64bit
||
1407 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1408 ((srfmp
->slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1409 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1410 __FUNCTION__
, srfmp
->slide
);
1411 kr
= KERN_INVALID_ARGUMENT
;
1414 #endif /* __arm64__ */
1416 /* get the VM object associated with the file to be mapped */
1417 file_object
= memory_object_control_to_vm_object(file_control
);
1418 assert(file_object
);
1420 /* establish the mappings for that file */
1421 for (i
= 0; i
< mappings_count
; i
++) {
1422 SHARED_REGION_TRACE_INFO(
1423 ("shared_region: mapping[%d]: "
1424 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1425 "maxprot:0x%x prot:0x%x\n",
1427 (long long)mappings
[i
].sms_address
,
1428 (long long)mappings
[i
].sms_size
,
1429 (long long)mappings
[i
].sms_file_offset
,
1430 mappings
[i
].sms_max_prot
,
1431 mappings
[i
].sms_init_prot
));
1433 if (mappings
[i
].sms_address
< *sfm_min_address
) {
1434 *sfm_min_address
= mappings
[i
].sms_address
;
1437 if (os_add_overflow(mappings
[i
].sms_address
,
1438 mappings
[i
].sms_size
,
1440 (vm_map_round_page(sfm_end
, VM_MAP_PAGE_MASK(sr_map
)) <
1441 mappings
[i
].sms_address
)) {
1443 kr
= KERN_INVALID_ARGUMENT
;
1446 if (sfm_end
> *sfm_max_address
) {
1447 *sfm_max_address
= sfm_end
;
1450 if (mappings
[i
].sms_init_prot
& VM_PROT_ZF
) {
1451 /* zero-filled memory */
1452 map_port
= MACH_PORT_NULL
;
1454 /* file-backed memory */
1455 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1459 * Remember which mappings need sliding.
1461 if (mappings
[i
].sms_max_prot
& VM_PROT_SLIDE
) {
1462 if (*mappings_to_slide_cnt
== VMSR_NUM_SLIDES
) {
1463 SHARED_REGION_TRACE_INFO(
1464 ("shared_region: mapping[%d]: "
1465 "address:0x%016llx size:0x%016llx "
1467 "maxprot:0x%x prot:0x%x "
1468 "too many mappings to slide...\n",
1470 (long long)mappings
[i
].sms_address
,
1471 (long long)mappings
[i
].sms_size
,
1472 (long long)mappings
[i
].sms_file_offset
,
1473 mappings
[i
].sms_max_prot
,
1474 mappings
[i
].sms_init_prot
));
1476 mappings_to_slide
[*mappings_to_slide_cnt
] = &mappings
[i
];
1477 *mappings_to_slide_cnt
+= 1;
1481 /* mapping's address is relative to the shared region base */
1482 target_address
= (vm_map_offset_t
)(mappings
[i
].sms_address
- sr_base_address
);
1484 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1485 vmk_flags
.vmkf_already
= TRUE
;
1486 /* no copy-on-read for mapped binaries */
1487 vmk_flags
.vmkf_no_copy_on_read
= 1;
1490 /* establish that mapping, OK if it's "already" there */
1491 if (map_port
== MACH_PORT_NULL
) {
1493 * We want to map some anonymous memory in a shared region.
1494 * We have to create the VM object now, so that it can be mapped "copy-on-write".
1496 obj_size
= vm_map_round_page(mappings
[i
].sms_size
, VM_MAP_PAGE_MASK(sr_map
));
1497 object
= vm_object_allocate(obj_size
);
1498 if (object
== VM_OBJECT_NULL
) {
1499 kr
= KERN_RESOURCE_SHORTAGE
;
1504 vm_map_round_page(mappings
[i
].sms_size
,
1505 VM_MAP_PAGE_MASK(sr_map
)),
1509 VM_KERN_MEMORY_NONE
,
1513 mappings
[i
].sms_init_prot
& VM_PROT_ALL
,
1514 mappings
[i
].sms_max_prot
& VM_PROT_ALL
,
1515 VM_INHERIT_DEFAULT
);
1518 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1519 kr
= vm_map_enter_mem_object(
1522 vm_map_round_page(mappings
[i
].sms_size
,
1523 VM_MAP_PAGE_MASK(sr_map
)),
1527 VM_KERN_MEMORY_NONE
,
1529 mappings
[i
].sms_file_offset
,
1531 mappings
[i
].sms_init_prot
& VM_PROT_ALL
,
1532 mappings
[i
].sms_max_prot
& VM_PROT_ALL
,
1533 VM_INHERIT_DEFAULT
);
1536 if (kr
== KERN_SUCCESS
) {
1538 * Record the first (chronologically) successful
1539 * mapping in this shared region.
1540 * We're protected by "sr_mapping_in_progress" here,
1541 * so no need to lock "shared_region".
1543 assert(current_file_index
< VMSR_NUM_SLIDES
);
1544 if (file_first_mappings
[current_file_index
] == (mach_vm_offset_t
) -1) {
1545 file_first_mappings
[current_file_index
] = target_address
;
1548 if (*mappings_to_slide_cnt
> 0 &&
1549 mappings_to_slide
[*mappings_to_slide_cnt
- 1] == &mappings
[i
]) {
1550 slid_mappings
[*mappings_to_slide_cnt
- 1] = target_address
;
1551 slid_file_controls
[*mappings_to_slide_cnt
- 1] = file_control
;
1555 * Record the lowest writable address in this
1556 * sub map, to log any unexpected unnesting below
1557 * that address (see log_unnest_badness()).
1559 if ((mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) &&
1560 sr_map
->is_nested_map
&&
1561 (lowest_unnestable_addr
== 0 ||
1562 (target_address
< lowest_unnestable_addr
))) {
1563 lowest_unnestable_addr
= target_address
;
1566 if (map_port
== MACH_PORT_NULL
) {
1568 * Get rid of the VM object we just created
1569 * but failed to map.
1571 vm_object_deallocate(object
);
1572 object
= VM_OBJECT_NULL
;
1574 if (kr
== KERN_MEMORY_PRESENT
) {
1576 * This exact mapping was already there:
1579 SHARED_REGION_TRACE_INFO(
1580 ("shared_region: mapping[%d]: "
1581 "address:0x%016llx size:0x%016llx "
1583 "maxprot:0x%x prot:0x%x "
1584 "already mapped...\n",
1586 (long long)mappings
[i
].sms_address
,
1587 (long long)mappings
[i
].sms_size
,
1588 (long long)mappings
[i
].sms_file_offset
,
1589 mappings
[i
].sms_max_prot
,
1590 mappings
[i
].sms_init_prot
));
1592 * We didn't establish this mapping ourselves;
1593 * let's reset its size, so that we do not
1594 * attempt to undo it if an error occurs later.
1596 mappings
[i
].sms_size
= 0;
1604 if (kr
!= KERN_SUCCESS
) {
1608 ++current_file_index
;
1611 if (file_first_mappings
[0] != (mach_vm_offset_t
)-1) {
1612 *first_mapping
= file_first_mappings
[0];
1616 if (kr
!= KERN_SUCCESS
) {
1617 /* the last mapping we tried (mappings[i]) failed ! */
1618 assert(i
< mappings_count
);
1619 SHARED_REGION_TRACE_ERROR(
1620 ("shared_region: mapping[%d]: "
1621 "address:0x%016llx size:0x%016llx "
1623 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1625 (long long)mappings
[i
].sms_address
,
1626 (long long)mappings
[i
].sms_size
,
1627 (long long)mappings
[i
].sms_file_offset
,
1628 mappings
[i
].sms_max_prot
,
1629 mappings
[i
].sms_init_prot
,
1633 * Respect the design of vm_shared_region_undo_mappings
1634 * as we are holding the sr_mapping_in_progress == true here.
1635 * So don't allow sr_map == NULL otherwise vm_shared_region_undo_mappings
1636 * will be blocked at waiting sr_mapping_in_progress to be false.
1638 assert(sr_map
!= NULL
);
1639 /* undo all the previous mappings */
1640 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, sr_file_mappings
, srfmp
, i
);
1644 *lowest_unnestable_addr_ptr
= lowest_unnestable_addr
;
1645 *sr_map_ptr
= sr_map
;
1646 return KERN_SUCCESS
;
1649 /* forwared declaration */
1650 __attribute__((noinline
))
1652 vm_shared_region_map_file_final(
1653 vm_shared_region_t shared_region
,
1655 mach_vm_offset_t sfm_min_address
,
1656 mach_vm_offset_t sfm_max_address
,
1657 mach_vm_offset_t
*file_first_mappings
);
1660 * Establish some mappings of a file in the shared region.
1661 * This is used by "dyld" via the shared_region_map_np() system call
1662 * to populate the shared region with the appropriate shared cache.
1664 * One could also call it several times to incrementally load several
1665 * libraries, as long as they do not overlap.
1666 * It will return KERN_SUCCESS if the mappings were successfully established
1667 * or if they were already established identically by another process.
1669 __attribute__((noinline
))
1671 vm_shared_region_map_file(
1672 vm_shared_region_t shared_region
,
1673 int sr_file_mappings_count
,
1674 struct _sr_file_mappings
*sr_file_mappings
)
1676 kern_return_t kr
= KERN_SUCCESS
;
1678 unsigned int mappings_to_slide_cnt
= 0;
1679 struct shared_file_mapping_slide_np
*mappings_to_slide
[VMSR_NUM_SLIDES
] = {};
1680 mach_vm_offset_t slid_mappings
[VMSR_NUM_SLIDES
];
1681 memory_object_control_t slid_file_controls
[VMSR_NUM_SLIDES
];
1682 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
)-1;
1683 mach_vm_offset_t sfm_min_address
= (mach_vm_offset_t
)-1;
1684 mach_vm_offset_t sfm_max_address
= 0;
1685 vm_map_t sr_map
= NULL
;
1686 vm_map_offset_t lowest_unnestable_addr
= 0;
1687 mach_vm_offset_t file_first_mappings
[VMSR_NUM_SLIDES
];
1688 for (i
= 0; i
< VMSR_NUM_SLIDES
; ++i
) {
1689 file_first_mappings
[i
] = (mach_vm_offset_t
) -1;
1692 kr
= vm_shared_region_map_file_setup(shared_region
, sr_file_mappings_count
, sr_file_mappings
,
1693 &mappings_to_slide_cnt
, &mappings_to_slide
[0], slid_mappings
, slid_file_controls
,
1694 &first_mapping
, &file_first_mappings
[0],
1695 &sfm_min_address
, &sfm_max_address
, &sr_map
, &lowest_unnestable_addr
);
1696 if (kr
!= KERN_SUCCESS
) {
1697 vm_shared_region_lock();
1702 * The call above installed direct mappings to the shared cache file.
1703 * Now we go back and overwrite the mappings that need relocation
1704 * with a special shared region pager.
1706 for (i
= 0; i
< mappings_to_slide_cnt
; ++i
) {
1707 kr
= vm_shared_region_slide(shared_region
->sr_slide
,
1708 mappings_to_slide
[i
]->sms_file_offset
,
1709 mappings_to_slide
[i
]->sms_size
,
1710 mappings_to_slide
[i
]->sms_slide_start
,
1711 mappings_to_slide
[i
]->sms_slide_size
,
1713 slid_file_controls
[i
],
1714 mappings_to_slide
[i
]->sms_max_prot
);
1715 if (kr
!= KERN_SUCCESS
) {
1716 SHARED_REGION_TRACE_ERROR(
1717 ("shared_region: region_slide("
1718 "slide:0x%x start:0x%016llx "
1719 "size:0x%016llx) failed 0x%x\n",
1720 shared_region
->sr_slide
,
1721 (long long)mappings_to_slide
[i
]->sms_slide_start
,
1722 (long long)mappings_to_slide
[i
]->sms_slide_size
,
1724 vm_shared_region_lock();
1729 assert(kr
== KERN_SUCCESS
);
1731 /* adjust the map's "lowest_unnestable_start" */
1732 lowest_unnestable_addr
&= ~(pmap_shared_region_size_min(sr_map
->pmap
) - 1);
1733 if (lowest_unnestable_addr
!= sr_map
->lowest_unnestable_start
) {
1734 vm_map_lock(sr_map
);
1735 sr_map
->lowest_unnestable_start
= lowest_unnestable_addr
;
1736 vm_map_unlock(sr_map
);
1739 vm_shared_region_lock();
1740 assert(shared_region
->sr_ref_count
> 0);
1741 assert(shared_region
->sr_mapping_in_progress
);
1743 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1744 if (shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1745 shared_region
->sr_first_mapping
= first_mapping
;
1748 vm_shared_region_map_file_final(shared_region
, sr_map
, sfm_min_address
, sfm_max_address
,
1749 &file_first_mappings
[0]);
1753 * We're done working on that shared region.
1754 * Wake up any waiting threads.
1756 shared_region
->sr_mapping_in_progress
= FALSE
;
1757 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1758 vm_shared_region_unlock();
1760 #if __has_feature(ptrauth_calls)
1761 if (kr
== KERN_SUCCESS
) {
1763 * Since authenticated mappings were just added to the shared region,
1764 * go back and remap them into private mappings for this task.
1766 kr
= vm_shared_region_auth_remap(shared_region
);
1768 #endif /* __has_feature(ptrauth_calls) */
1770 /* Cache shared region info needed for telemetry in the task */
1772 if (kr
== KERN_SUCCESS
&& (task
= current_task())->task_shared_region_slide
== -1) {
1773 mach_vm_offset_t start_address
;
1774 (void)vm_shared_region_start_address(shared_region
, &start_address
, task
);
1777 SHARED_REGION_TRACE_DEBUG(
1778 ("shared_region: map(%p) <- 0x%x \n",
1779 (void *)VM_KERNEL_ADDRPERM(shared_region
), kr
));
1784 * Final part of vm_shared_region_map_file().
1785 * Kept in separate function to avoid blowing out the stack.
1787 __attribute__((noinline
))
1789 vm_shared_region_map_file_final(
1790 vm_shared_region_t shared_region
,
1792 mach_vm_offset_t sfm_min_address
,
1793 mach_vm_offset_t sfm_max_address
,
1794 __unused mach_vm_offset_t
*file_first_mappings
)
1796 struct _dyld_cache_header sr_cache_header
;
1798 size_t image_array_length
;
1799 struct _dyld_cache_image_text_info
*sr_image_layout
;
1800 boolean_t locally_built
= FALSE
;
1804 * copy in the shared region UUID to the shared region structure.
1805 * we do this indirectly by first copying in the shared cache header
1806 * and then copying the UUID from there because we'll need to look
1807 * at other content from the shared cache header.
1809 if (!shared_region
->sr_uuid_copied
) {
1810 error
= copyin((user_addr_t
)(shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
),
1811 (char *)&sr_cache_header
,
1812 sizeof(sr_cache_header
));
1814 memcpy(&shared_region
->sr_uuid
, &sr_cache_header
.uuid
, sizeof(shared_region
->sr_uuid
));
1815 shared_region
->sr_uuid_copied
= TRUE
;
1816 locally_built
= sr_cache_header
.locallyBuiltCache
;
1818 #if DEVELOPMENT || DEBUG
1819 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1820 "offset:0 size:0x%016llx) failed with %d\n",
1821 (long long)shared_region
->sr_base_address
,
1822 (long long)shared_region
->sr_first_mapping
,
1823 (long long)sizeof(sr_cache_header
),
1825 #endif /* DEVELOPMENT || DEBUG */
1826 shared_region
->sr_uuid_copied
= FALSE
;
1831 * We save a pointer to the shared cache mapped by the "init task", i.e. launchd. This is used by
1832 * the stackshot code to reduce output size in the common case that everything maps the same shared cache.
1833 * One gotcha is that "userspace reboots" can occur which can cause a new shared region to be the primary
1834 * region. In that case, launchd re-exec's itself, so we may go through this path multiple times. We
1835 * let the most recent one win.
1837 * Check whether the shared cache is a custom built one and copy in the shared cache layout accordingly.
1839 bool is_init_task
= (task_pid(current_task()) == 1);
1840 if (shared_region
->sr_uuid_copied
&& is_init_task
) {
1841 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1842 if (locally_built
) {
1843 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_START
);
1844 image_array_length
= (size_t)(sr_cache_header
.imagesTextCount
* sizeof(struct _dyld_cache_image_text_info
));
1845 sr_image_layout
= kheap_alloc(KHEAP_DATA_BUFFERS
, image_array_length
, Z_WAITOK
);
1846 error
= copyin((user_addr_t
)(shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1847 sr_cache_header
.imagesTextOffset
), (char *)sr_image_layout
, image_array_length
);
1849 if (sr_cache_header
.imagesTextCount
>= UINT32_MAX
) {
1850 panic("shared_region: sr_cache_header.imagesTextCount >= UINT32_MAX");
1852 shared_region
->sr_images
= kalloc((vm_size_t
)(sr_cache_header
.imagesTextCount
* sizeof(struct dyld_uuid_info_64
)));
1853 for (size_t index
= 0; index
< sr_cache_header
.imagesTextCount
; index
++) {
1854 memcpy((char *)&shared_region
->sr_images
[index
].imageUUID
, (char *)&sr_image_layout
[index
].uuid
,
1855 sizeof(shared_region
->sr_images
[index
].imageUUID
));
1856 shared_region
->sr_images
[index
].imageLoadAddress
= sr_image_layout
[index
].loadAddress
;
1859 shared_region
->sr_images_count
= (uint32_t) sr_cache_header
.imagesTextCount
;
1861 #if DEVELOPMENT || DEBUG
1862 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1863 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1864 (long long)shared_region
->sr_base_address
,
1865 (long long)shared_region
->sr_first_mapping
,
1866 (long long)sr_cache_header
.imagesTextOffset
,
1867 (long long)image_array_length
,
1869 #endif /* DEVELOPMENT || DEBUG */
1871 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_END
, shared_region
->sr_images_count
);
1872 kheap_free(KHEAP_DATA_BUFFERS
, sr_image_layout
, image_array_length
);
1873 sr_image_layout
= NULL
;
1875 primary_system_shared_region
= shared_region
;
1879 * If we succeeded, we know the bounds of the shared region.
1880 * Trim our pmaps to only cover this range (if applicable to
1883 if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map
)) {
1884 pmap_trim(current_map()->pmap
, sr_map
->pmap
, sfm_min_address
, sfm_max_address
- sfm_min_address
);
1889 * Retrieve a task's shared region and grab an extra reference to
1890 * make sure it doesn't disappear while the caller is using it.
1891 * The caller is responsible for consuming that extra reference if
1894 * This also tries to trim the pmap for the shared region.
1897 vm_shared_region_trim_and_get(task_t task
)
1899 vm_shared_region_t shared_region
;
1900 ipc_port_t sr_handle
;
1901 vm_named_entry_t sr_mem_entry
;
1904 /* Get the shared region and the map. */
1905 shared_region
= vm_shared_region_get(task
);
1906 if (shared_region
== NULL
) {
1910 sr_handle
= shared_region
->sr_mem_entry
;
1911 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1912 sr_map
= sr_mem_entry
->backing
.map
;
1914 /* Trim the pmap if possible. */
1915 if (VM_MAP_PAGE_SHIFT(task
->map
) == VM_MAP_PAGE_SHIFT(sr_map
)) {
1916 pmap_trim(task
->map
->pmap
, sr_map
->pmap
, 0, 0);
1919 return shared_region
;
1923 * Enter the appropriate shared region into "map" for "task".
1924 * This involves looking up the shared region (and possibly creating a new
1925 * one) for the desired environment, then mapping the VM sub map into the
1926 * task's VM "map", with the appropriate level of pmap-nesting.
1929 vm_shared_region_enter(
1930 struct _vm_map
*map
,
1935 cpu_subtype_t cpu_subtype
,
1939 vm_shared_region_t shared_region
;
1940 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1941 vm_map_size_t sr_size
, mapping_size
;
1942 vm_map_offset_t sr_pmap_nesting_start
;
1943 vm_map_size_t sr_pmap_nesting_size
;
1944 ipc_port_t sr_handle
;
1945 vm_prot_t cur_prot
, max_prot
;
1947 SHARED_REGION_TRACE_DEBUG(
1948 ("shared_region: -> "
1949 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1950 (void *)VM_KERNEL_ADDRPERM(map
),
1951 (void *)VM_KERNEL_ADDRPERM(task
),
1952 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1953 cpu
, cpu_subtype
, is_64bit
));
1955 /* lookup (create if needed) the shared region for this environment */
1956 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, cpu_subtype
, is_64bit
, reslide
);
1957 if (shared_region
== NULL
) {
1958 /* this should not happen ! */
1959 SHARED_REGION_TRACE_ERROR(
1960 ("shared_region: -> "
1961 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d): "
1962 "lookup failed !\n",
1963 (void *)VM_KERNEL_ADDRPERM(map
),
1964 (void *)VM_KERNEL_ADDRPERM(task
),
1965 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1966 cpu
, cpu_subtype
, is_64bit
, reslide
));
1967 //panic("shared_region_enter: lookup failed\n");
1968 return KERN_FAILURE
;
1972 /* no need to lock since this data is never modified */
1973 sr_address
= (vm_map_offset_t
)shared_region
->sr_base_address
;
1974 sr_size
= (vm_map_size_t
)shared_region
->sr_size
;
1975 sr_handle
= shared_region
->sr_mem_entry
;
1976 sr_pmap_nesting_start
= (vm_map_offset_t
)shared_region
->sr_pmap_nesting_start
;
1977 sr_pmap_nesting_size
= (vm_map_size_t
)shared_region
->sr_pmap_nesting_size
;
1979 cur_prot
= VM_PROT_READ
;
1980 if (VM_MAP_POLICY_WRITABLE_SHARED_REGION(map
)) {
1982 * XXX BINARY COMPATIBILITY
1983 * java6 apparently needs to modify some code in the
1984 * dyld shared cache and needs to be allowed to add
1987 max_prot
= VM_PROT_ALL
;
1989 max_prot
= VM_PROT_READ
;
1993 * Start mapping the shared region's VM sub map into the task's VM map.
1997 if (sr_pmap_nesting_start
> sr_address
) {
1998 /* we need to map a range without pmap-nesting first */
1999 target_address
= sr_address
;
2000 mapping_size
= sr_pmap_nesting_start
- sr_address
;
2001 kr
= vm_map_enter_mem_object(
2007 VM_MAP_KERNEL_FLAGS_NONE
,
2008 VM_KERN_MEMORY_NONE
,
2015 if (kr
!= KERN_SUCCESS
) {
2016 SHARED_REGION_TRACE_ERROR(
2017 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2018 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2019 (void *)VM_KERNEL_ADDRPERM(map
),
2020 (void *)VM_KERNEL_ADDRPERM(task
),
2021 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2022 cpu
, cpu_subtype
, is_64bit
,
2023 (long long)target_address
,
2024 (long long)mapping_size
,
2025 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2028 SHARED_REGION_TRACE_DEBUG(
2029 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2030 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2031 (void *)VM_KERNEL_ADDRPERM(map
),
2032 (void *)VM_KERNEL_ADDRPERM(task
),
2033 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2034 cpu
, cpu_subtype
, is_64bit
,
2035 (long long)target_address
, (long long)mapping_size
,
2036 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2037 sr_offset
+= mapping_size
;
2038 sr_size
-= mapping_size
;
2041 * We may need to map several pmap-nested portions, due to platform
2042 * specific restrictions on pmap nesting.
2043 * The pmap-nesting is triggered by the "vmkf_nested_pmap" flag...
2046 sr_pmap_nesting_size
> 0;
2047 sr_offset
+= mapping_size
,
2048 sr_size
-= mapping_size
,
2049 sr_pmap_nesting_size
-= mapping_size
) {
2050 vm_map_kernel_flags_t vmk_flags
;
2052 target_address
= sr_address
+ sr_offset
;
2053 mapping_size
= sr_pmap_nesting_size
;
2054 if (mapping_size
> pmap_nesting_size_max(map
->pmap
)) {
2055 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max(map
->pmap
);
2057 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2058 vmk_flags
.vmkf_nested_pmap
= TRUE
;
2059 kr
= vm_map_enter_mem_object(
2066 VM_MEMORY_SHARED_PMAP
,
2073 if (kr
!= KERN_SUCCESS
) {
2074 SHARED_REGION_TRACE_ERROR(
2075 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2076 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2077 (void *)VM_KERNEL_ADDRPERM(map
),
2078 (void *)VM_KERNEL_ADDRPERM(task
),
2079 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2080 cpu
, cpu_subtype
, is_64bit
,
2081 (long long)target_address
,
2082 (long long)mapping_size
,
2083 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2086 SHARED_REGION_TRACE_DEBUG(
2087 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2088 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2089 (void *)VM_KERNEL_ADDRPERM(map
),
2090 (void *)VM_KERNEL_ADDRPERM(task
),
2091 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2092 cpu
, cpu_subtype
, is_64bit
,
2093 (long long)target_address
, (long long)mapping_size
,
2094 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2097 /* and there's some left to be mapped without pmap-nesting */
2098 target_address
= sr_address
+ sr_offset
;
2099 mapping_size
= sr_size
;
2100 kr
= vm_map_enter_mem_object(
2106 VM_MAP_KERNEL_FLAGS_NONE
,
2107 VM_KERN_MEMORY_NONE
,
2114 if (kr
!= KERN_SUCCESS
) {
2115 SHARED_REGION_TRACE_ERROR(
2116 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2117 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2118 (void *)VM_KERNEL_ADDRPERM(map
),
2119 (void *)VM_KERNEL_ADDRPERM(task
),
2120 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2121 cpu
, cpu_subtype
, is_64bit
,
2122 (long long)target_address
,
2123 (long long)mapping_size
,
2124 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2127 SHARED_REGION_TRACE_DEBUG(
2128 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2129 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2130 (void *)VM_KERNEL_ADDRPERM(map
),
2131 (void *)VM_KERNEL_ADDRPERM(task
),
2132 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2133 cpu
, cpu_subtype
, is_64bit
,
2134 (long long)target_address
, (long long)mapping_size
,
2135 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2136 sr_offset
+= mapping_size
;
2137 sr_size
-= mapping_size
;
2139 assert(sr_size
== 0);
2142 if (kr
== KERN_SUCCESS
) {
2143 /* let the task use that shared region */
2144 vm_shared_region_set(task
, shared_region
);
2146 /* drop our reference since we're not using it */
2147 vm_shared_region_deallocate(shared_region
);
2148 vm_shared_region_set(task
, NULL
);
2151 SHARED_REGION_TRACE_DEBUG(
2152 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
2153 (void *)VM_KERNEL_ADDRPERM(map
),
2154 (void *)VM_KERNEL_ADDRPERM(task
),
2155 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2156 cpu
, cpu_subtype
, is_64bit
,
2161 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
2162 struct vm_shared_region_slide_info slide_info
;
2165 vm_shared_region_sliding_valid(uint32_t slide
)
2167 kern_return_t kr
= KERN_SUCCESS
;
2168 vm_shared_region_t sr
= vm_shared_region_get(current_task());
2170 /* No region yet? we're fine. */
2175 if (sr
->sr_slide
!= 0 && slide
!= 0) {
2176 if (slide
== sr
->sr_slide
) {
2178 * Request for sliding when we've
2179 * already done it with exactly the
2180 * same slide value before.
2181 * This isn't wrong technically but
2182 * we don't want to slide again and
2183 * so we return this value.
2185 kr
= KERN_INVALID_ARGUMENT
;
2187 printf("Mismatched shared region slide\n");
2191 vm_shared_region_deallocate(sr
);
2196 * Actually create (really overwrite) the mapping to part of the shared cache which
2197 * undergoes relocation. This routine reads in the relocation info from dyld and
2198 * verifies it. It then creates a (or finds a matching) shared region pager which
2199 * handles the actual modification of the page contents and installs the mapping
2203 vm_shared_region_slide_mapping(
2204 vm_shared_region_t sr
,
2205 user_addr_t slide_info_addr
,
2206 mach_vm_size_t slide_info_size
,
2207 mach_vm_offset_t start
,
2208 mach_vm_size_t size
,
2209 mach_vm_offset_t slid_mapping
,
2211 memory_object_control_t sr_file_control
,
2215 vm_object_t object
= VM_OBJECT_NULL
;
2216 vm_shared_region_slide_info_t si
= NULL
;
2217 vm_map_entry_t tmp_entry
= VM_MAP_ENTRY_NULL
;
2218 struct vm_map_entry tmp_entry_store
;
2219 memory_object_t sr_pager
= MEMORY_OBJECT_NULL
;
2222 vm_map_kernel_flags_t vmk_flags
;
2223 vm_map_offset_t map_addr
;
2224 void *slide_info_entry
= NULL
;
2227 assert(sr
->sr_slide_in_progress
);
2229 if (sr_file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
2230 return KERN_INVALID_ARGUMENT
;
2234 * Copy in and verify the relocation information.
2236 if (slide_info_size
< MIN_SLIDE_INFO_SIZE
) {
2237 printf("Slide_info_size too small: %lx\n", (uintptr_t)slide_info_size
);
2238 return KERN_FAILURE
;
2240 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
2241 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
2242 return KERN_FAILURE
;
2245 slide_info_entry
= kheap_alloc(KHEAP_DATA_BUFFERS
, (vm_size_t
)slide_info_size
, Z_WAITOK
);
2246 if (slide_info_entry
== NULL
) {
2247 return KERN_RESOURCE_SHORTAGE
;
2249 error
= copyin(slide_info_addr
, slide_info_entry
, (size_t)slide_info_size
);
2251 printf("copyin of slide_info failed\n");
2252 kr
= KERN_INVALID_ADDRESS
;
2256 if ((kr
= vm_shared_region_slide_sanity_check(slide_info_entry
, slide_info_size
)) != KERN_SUCCESS
) {
2257 printf("Sanity Check failed for slide_info\n");
2262 * Allocate and fill in a vm_shared_region_slide_info.
2263 * This will either be used by a new pager, or used to find
2264 * a pre-existing matching pager.
2266 object
= memory_object_control_to_vm_object(sr_file_control
);
2267 if (object
== VM_OBJECT_NULL
|| object
->internal
) {
2268 object
= VM_OBJECT_NULL
;
2269 kr
= KERN_INVALID_ADDRESS
;
2273 si
= kalloc(sizeof(*si
));
2275 kr
= KERN_RESOURCE_SHORTAGE
;
2278 vm_object_lock(object
);
2280 vm_object_reference_locked(object
); /* for si->slide_object */
2281 object
->object_is_shared_cache
= TRUE
;
2282 vm_object_unlock(object
);
2284 si
->si_slide_info_entry
= slide_info_entry
;
2285 si
->si_slide_info_size
= slide_info_size
;
2287 assert(slid_mapping
!= (mach_vm_offset_t
) -1);
2288 si
->si_slid_address
= slid_mapping
+ sr
->sr_base_address
;
2289 si
->si_slide_object
= object
;
2290 si
->si_start
= start
;
2291 si
->si_end
= si
->si_start
+ size
;
2292 si
->si_slide
= slide
;
2293 #if __has_feature(ptrauth_calls)
2295 * If there is authenticated pointer data in this slid mapping,
2296 * then just add the information needed to create new pagers for
2297 * different shared_region_id's later.
2299 if (sr
->sr_cpu_type
== CPU_TYPE_ARM64
&&
2300 sr
->sr_cpu_subtype
== CPU_SUBTYPE_ARM64E
&&
2301 !(prot
& VM_PROT_NOAUTH
)) {
2302 if (sr
->sr_num_auth_section
== NUM_SR_AUTH_SECTIONS
) {
2303 printf("Too many auth/private sections for shared region!!\n");
2304 kr
= KERN_INVALID_ARGUMENT
;
2307 si
->si_ptrauth
= TRUE
;
2308 sr
->sr_auth_section
[sr
->sr_num_auth_section
++] = si
;
2310 * Remember the shared region, since that's where we'll
2311 * stash this info for all auth pagers to share. Each pager
2312 * will need to take a reference to it.
2314 si
->si_shared_region
= sr
;
2318 si
->si_shared_region
= NULL
;
2319 si
->si_ptrauth
= FALSE
;
2320 #else /* __has_feature(ptrauth_calls) */
2321 (void)prot
; /* silence unused warning */
2322 #endif /* __has_feature(ptrauth_calls) */
2325 * find the pre-existing shared region's map entry to slide
2327 sr_map
= vm_shared_region_vm_map(sr
);
2328 kr
= find_mapping_to_slide(sr_map
, (vm_map_address_t
)slid_mapping
, &tmp_entry_store
);
2329 if (kr
!= KERN_SUCCESS
) {
2332 tmp_entry
= &tmp_entry_store
;
2335 * The object must exactly cover the region to slide.
2337 assert(VME_OFFSET(tmp_entry
) == start
);
2338 assert(tmp_entry
->vme_end
- tmp_entry
->vme_start
== size
);
2340 /* create a "shared_region" sliding pager */
2341 sr_pager
= shared_region_pager_setup(VME_OBJECT(tmp_entry
), VME_OFFSET(tmp_entry
), si
, 0);
2342 if (sr_pager
== MEMORY_OBJECT_NULL
) {
2343 kr
= KERN_RESOURCE_SHORTAGE
;
2347 /* map that pager over the portion of the mapping that needs sliding */
2348 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
2349 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2350 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
2351 map_addr
= tmp_entry
->vme_start
;
2352 kr
= vm_map_enter_mem_object(sr_map
,
2354 (tmp_entry
->vme_end
- tmp_entry
->vme_start
),
2355 (mach_vm_offset_t
) 0,
2358 VM_KERN_MEMORY_NONE
,
2359 (ipc_port_t
)(uintptr_t) sr_pager
,
2362 tmp_entry
->protection
,
2363 tmp_entry
->max_protection
,
2364 tmp_entry
->inheritance
);
2365 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x\n", kr
);
2366 assertf(map_addr
== tmp_entry
->vme_start
,
2367 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
2369 (uint64_t) tmp_entry
->vme_start
,
2376 if (sr_pager
!= NULL
) {
2378 * Release the sr_pager reference obtained by shared_region_pager_setup().
2379 * The mapping, if it succeeded, is now holding a reference on the memory object.
2381 memory_object_deallocate(sr_pager
);
2382 sr_pager
= MEMORY_OBJECT_NULL
;
2384 if (tmp_entry
!= NULL
) {
2385 /* release extra ref on tmp_entry's VM object */
2386 vm_object_deallocate(VME_OBJECT(tmp_entry
));
2387 tmp_entry
= VM_MAP_ENTRY_NULL
;
2390 if (kr
!= KERN_SUCCESS
) {
2393 if (si
->si_slide_object
) {
2394 vm_object_deallocate(si
->si_slide_object
);
2395 si
->si_slide_object
= VM_OBJECT_NULL
;
2397 kfree(si
, sizeof(*si
));
2400 if (slide_info_entry
!= NULL
) {
2401 kheap_free(KHEAP_DATA_BUFFERS
, slide_info_entry
, (vm_size_t
)slide_info_size
);
2402 slide_info_entry
= NULL
;
2408 static kern_return_t
2409 vm_shared_region_slide_sanity_check_v2(
2410 vm_shared_region_slide_info_entry_v2_t s_info
,
2411 mach_vm_size_t slide_info_size
)
2413 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v2
)) {
2414 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2415 return KERN_FAILURE
;
2417 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2418 return KERN_FAILURE
;
2421 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2423 uint32_t page_starts_count
= s_info
->page_starts_count
;
2424 uint32_t page_extras_count
= s_info
->page_extras_count
;
2425 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2426 if (num_trailing_entries
< page_starts_count
) {
2427 return KERN_FAILURE
;
2430 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2431 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2432 if (trailing_size
>> 1 != num_trailing_entries
) {
2433 return KERN_FAILURE
;
2436 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2437 if (required_size
< sizeof(*s_info
)) {
2438 return KERN_FAILURE
;
2441 if (required_size
> slide_info_size
) {
2442 return KERN_FAILURE
;
2445 return KERN_SUCCESS
;
2448 static kern_return_t
2449 vm_shared_region_slide_sanity_check_v3(
2450 vm_shared_region_slide_info_entry_v3_t s_info
,
2451 mach_vm_size_t slide_info_size
)
2453 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v3
)) {
2454 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2455 return KERN_FAILURE
;
2457 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2458 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info
->page_size
, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE
);
2459 return KERN_FAILURE
;
2462 uint32_t page_starts_count
= s_info
->page_starts_count
;
2463 mach_vm_size_t num_trailing_entries
= page_starts_count
;
2464 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2465 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2466 if (required_size
< sizeof(*s_info
)) {
2467 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)sizeof(*s_info
));
2468 return KERN_FAILURE
;
2471 if (required_size
> slide_info_size
) {
2472 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)slide_info_size
);
2473 return KERN_FAILURE
;
2476 return KERN_SUCCESS
;
2479 static kern_return_t
2480 vm_shared_region_slide_sanity_check_v4(
2481 vm_shared_region_slide_info_entry_v4_t s_info
,
2482 mach_vm_size_t slide_info_size
)
2484 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v4
)) {
2485 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2486 return KERN_FAILURE
;
2488 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2489 return KERN_FAILURE
;
2492 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2494 uint32_t page_starts_count
= s_info
->page_starts_count
;
2495 uint32_t page_extras_count
= s_info
->page_extras_count
;
2496 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2497 if (num_trailing_entries
< page_starts_count
) {
2498 return KERN_FAILURE
;
2501 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2502 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2503 if (trailing_size
>> 1 != num_trailing_entries
) {
2504 return KERN_FAILURE
;
2507 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2508 if (required_size
< sizeof(*s_info
)) {
2509 return KERN_FAILURE
;
2512 if (required_size
> slide_info_size
) {
2513 return KERN_FAILURE
;
2516 return KERN_SUCCESS
;
2520 static kern_return_t
2521 vm_shared_region_slide_sanity_check(
2522 vm_shared_region_slide_info_entry_t s_info
,
2523 mach_vm_size_t s_info_size
)
2527 switch (s_info
->version
) {
2529 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, s_info_size
);
2532 kr
= vm_shared_region_slide_sanity_check_v3(&s_info
->v3
, s_info_size
);
2535 kr
= vm_shared_region_slide_sanity_check_v4(&s_info
->v4
, s_info_size
);
2543 static kern_return_t
2545 uint8_t *page_content
,
2546 uint16_t start_offset
,
2547 uint32_t slide_amount
,
2548 vm_shared_region_slide_info_entry_v2_t s_info
)
2550 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2552 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2553 const uint32_t value_mask
= ~delta_mask
;
2554 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2555 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2557 uint32_t page_offset
= start_offset
;
2560 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2564 loc
= page_content
+ page_offset
;
2565 memcpy(&value
, loc
, sizeof(value
));
2566 delta
= (value
& delta_mask
) >> delta_shift
;
2567 value
&= value_mask
;
2571 value
+= slide_amount
;
2573 memcpy(loc
, &value
, sizeof(value
));
2574 page_offset
+= delta
;
2577 /* If the offset went past the end of the page, then the slide data is invalid. */
2578 if (page_offset
> last_page_offset
) {
2579 return KERN_FAILURE
;
2581 return KERN_SUCCESS
;
2584 static kern_return_t
2586 uint8_t *page_content
,
2587 uint16_t start_offset
,
2588 uint32_t slide_amount
,
2589 vm_shared_region_slide_info_entry_v2_t s_info
)
2591 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2593 const uint64_t delta_mask
= s_info
->delta_mask
;
2594 const uint64_t value_mask
= ~delta_mask
;
2595 const uint64_t value_add
= s_info
->value_add
;
2596 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2598 uint32_t page_offset
= start_offset
;
2601 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2605 loc
= page_content
+ page_offset
;
2606 memcpy(&value
, loc
, sizeof(value
));
2607 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2608 value
&= value_mask
;
2612 value
+= slide_amount
;
2614 memcpy(loc
, &value
, sizeof(value
));
2615 page_offset
+= delta
;
2618 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2619 /* If a pointer straddling the page boundary needs to be adjusted, then
2620 * add the slide to the lower half. The encoding guarantees that the upper
2621 * half on the next page will need no masking.
2623 * This assumes a little-endian machine and that the region being slid
2624 * never crosses a 4 GB boundary. */
2626 uint8_t *loc
= page_content
+ page_offset
;
2629 memcpy(&value
, loc
, sizeof(value
));
2630 value
+= slide_amount
;
2631 memcpy(loc
, &value
, sizeof(value
));
2632 } else if (page_offset
> last_page_offset
) {
2633 return KERN_FAILURE
;
2636 return KERN_SUCCESS
;
2639 static kern_return_t
2643 uint8_t *page_content
,
2644 uint16_t start_offset
,
2645 uint32_t slide_amount
,
2646 vm_shared_region_slide_info_entry_v2_t s_info
)
2650 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2652 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2655 if (kr
!= KERN_SUCCESS
) {
2656 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2657 pageIndex
, start_offset
, slide_amount
);
2662 static kern_return_t
2663 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2665 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->si_slide_info_entry
->v2
;
2666 const uint32_t slide_amount
= si
->si_slide
;
2668 /* The high bits of the delta_mask field are nonzero precisely when the shared
2669 * cache is 64-bit. */
2670 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2672 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2673 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2675 uint8_t *page_content
= (uint8_t *)vaddr
;
2676 uint16_t page_entry
;
2678 if (pageIndex
>= s_info
->page_starts_count
) {
2679 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2680 pageIndex
, s_info
->page_starts_count
);
2681 return KERN_FAILURE
;
2683 page_entry
= page_starts
[pageIndex
];
2685 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2686 return KERN_SUCCESS
;
2689 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2690 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2694 uint16_t page_start_offset
;
2697 if (chain_index
>= s_info
->page_extras_count
) {
2698 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2699 chain_index
, s_info
->page_extras_count
);
2700 return KERN_FAILURE
;
2702 info
= page_extras
[chain_index
];
2703 page_start_offset
= (uint16_t)((info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2705 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2706 if (kr
!= KERN_SUCCESS
) {
2707 return KERN_FAILURE
;
2711 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2713 const uint16_t page_start_offset
= (uint16_t)(page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2716 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2717 if (kr
!= KERN_SUCCESS
) {
2718 return KERN_FAILURE
;
2722 return KERN_SUCCESS
;
2726 static kern_return_t
2727 vm_shared_region_slide_page_v3(
2728 vm_shared_region_slide_info_t si
,
2730 __unused mach_vm_offset_t uservaddr
,
2732 #if !__has_feature(ptrauth_calls)
2734 #endif /* !__has_feature(ptrauth_calls) */
2737 vm_shared_region_slide_info_entry_v3_t s_info
= &si
->si_slide_info_entry
->v3
;
2738 const uint32_t slide_amount
= si
->si_slide
;
2740 uint8_t *page_content
= (uint8_t *)vaddr
;
2741 uint16_t page_entry
;
2743 if (pageIndex
>= s_info
->page_starts_count
) {
2744 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2745 pageIndex
, s_info
->page_starts_count
);
2746 return KERN_FAILURE
;
2748 page_entry
= s_info
->page_starts
[pageIndex
];
2750 if (page_entry
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2751 return KERN_SUCCESS
;
2754 uint8_t* rebaseLocation
= page_content
;
2755 uint64_t delta
= page_entry
;
2757 rebaseLocation
+= delta
;
2759 memcpy(&value
, rebaseLocation
, sizeof(value
));
2760 delta
= ((value
& 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2762 // A pointer is one of :
2764 // uint64_t pointerValue : 51;
2765 // uint64_t offsetToNextPointer : 11;
2766 // uint64_t isBind : 1 = 0;
2767 // uint64_t authenticated : 1 = 0;
2770 // uint32_t offsetFromSharedCacheBase;
2771 // uint16_t diversityData;
2772 // uint16_t hasAddressDiversity : 1;
2773 // uint16_t hasDKey : 1;
2774 // uint16_t hasBKey : 1;
2775 // uint16_t offsetToNextPointer : 11;
2776 // uint16_t isBind : 1;
2777 // uint16_t authenticated : 1 = 1;
2780 bool isBind
= (value
& (1ULL << 62)) == 1;
2782 return KERN_FAILURE
;
2785 #if __has_feature(ptrauth_calls)
2786 uint16_t diversity_data
= (uint16_t)(value
>> 32);
2787 bool hasAddressDiversity
= (value
& (1ULL << 48)) != 0;
2788 ptrauth_key key
= (ptrauth_key
)((value
>> 49) & 0x3);
2789 #endif /* __has_feature(ptrauth_calls) */
2790 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
2792 if (isAuthenticated
) {
2793 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2794 value
= (value
& 0xFFFFFFFF) + slide_amount
;
2795 // Add in the offset from the mach_header
2796 const uint64_t value_add
= s_info
->value_add
;
2799 #if __has_feature(ptrauth_calls)
2800 uint64_t discriminator
= diversity_data
;
2801 if (hasAddressDiversity
) {
2802 // First calculate a new discriminator using the address of where we are trying to store the value
2803 uintptr_t pageOffset
= rebaseLocation
- page_content
;
2804 discriminator
= __builtin_ptrauth_blend_discriminator((void*)(((uintptr_t)uservaddr
) + pageOffset
), discriminator
);
2807 if (jop_key
!= 0 && si
->si_ptrauth
&& !arm_user_jop_disabled()) {
2809 * these pointers are used in user mode. disable the kernel key diversification
2810 * so we can sign them for use in user mode.
2812 value
= (uintptr_t)pmap_sign_user_ptr((void *)value
, key
, discriminator
, jop_key
);
2814 #endif /* __has_feature(ptrauth_calls) */
2816 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2817 // Regular pointer which needs to fit in 51-bits of value.
2818 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2819 // and the bottom 43-bits to be fit in to 51-bits.
2820 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
2821 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
2822 uint64_t targetValue
= (top8Bits
<< 13) | bottom43Bits
;
2823 value
= targetValue
+ slide_amount
;
2826 memcpy(rebaseLocation
, &value
, sizeof(value
));
2827 } while (delta
!= 0);
2829 return KERN_SUCCESS
;
2832 static kern_return_t
2834 uint8_t *page_content
,
2835 uint16_t start_offset
,
2836 uint32_t slide_amount
,
2837 vm_shared_region_slide_info_entry_v4_t s_info
)
2839 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2841 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2842 const uint32_t value_mask
= ~delta_mask
;
2843 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2844 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2846 uint32_t page_offset
= start_offset
;
2849 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2853 loc
= page_content
+ page_offset
;
2854 memcpy(&value
, loc
, sizeof(value
));
2855 delta
= (value
& delta_mask
) >> delta_shift
;
2856 value
&= value_mask
;
2858 if ((value
& 0xFFFF8000) == 0) {
2859 // small positive non-pointer, use as-is
2860 } else if ((value
& 0x3FFF8000) == 0x3FFF8000) {
2861 // small negative non-pointer
2862 value
|= 0xC0000000;
2864 // pointer that needs rebasing
2866 value
+= slide_amount
;
2868 memcpy(loc
, &value
, sizeof(value
));
2869 page_offset
+= delta
;
2872 /* If the offset went past the end of the page, then the slide data is invalid. */
2873 if (page_offset
> last_page_offset
) {
2874 return KERN_FAILURE
;
2876 return KERN_SUCCESS
;
2879 static kern_return_t
2880 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2882 vm_shared_region_slide_info_entry_v4_t s_info
= &si
->si_slide_info_entry
->v4
;
2883 const uint32_t slide_amount
= si
->si_slide
;
2885 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2886 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2888 uint8_t *page_content
= (uint8_t *)vaddr
;
2889 uint16_t page_entry
;
2891 if (pageIndex
>= s_info
->page_starts_count
) {
2892 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2893 pageIndex
, s_info
->page_starts_count
);
2894 return KERN_FAILURE
;
2896 page_entry
= page_starts
[pageIndex
];
2898 if (page_entry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2899 return KERN_SUCCESS
;
2902 if (page_entry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2903 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
;
2907 uint16_t page_start_offset
;
2910 if (chain_index
>= s_info
->page_extras_count
) {
2911 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2912 chain_index
, s_info
->page_extras_count
);
2913 return KERN_FAILURE
;
2915 info
= page_extras
[chain_index
];
2916 page_start_offset
= (uint16_t)((info
& DYLD_CACHE_SLIDE4_PAGE_INDEX
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2918 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2919 if (kr
!= KERN_SUCCESS
) {
2920 return KERN_FAILURE
;
2924 } while (!(info
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
));
2926 const uint16_t page_start_offset
= (uint16_t)(page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2929 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2930 if (kr
!= KERN_SUCCESS
) {
2931 return KERN_FAILURE
;
2935 return KERN_SUCCESS
;
2941 vm_shared_region_slide_page(
2942 vm_shared_region_slide_info_t si
,
2944 mach_vm_offset_t uservaddr
,
2948 switch (si
->si_slide_info_entry
->version
) {
2950 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2952 return vm_shared_region_slide_page_v3(si
, vaddr
, uservaddr
, pageIndex
, jop_key
);
2954 return vm_shared_region_slide_page_v4(si
, vaddr
, pageIndex
);
2956 return KERN_FAILURE
;
2960 /******************************************************************************/
2961 /* Comm page support */
2962 /******************************************************************************/
2964 SECURITY_READ_ONLY_LATE(ipc_port_t
) commpage32_handle
= IPC_PORT_NULL
;
2965 SECURITY_READ_ONLY_LATE(ipc_port_t
) commpage64_handle
= IPC_PORT_NULL
;
2966 SECURITY_READ_ONLY_LATE(vm_named_entry_t
) commpage32_entry
= NULL
;
2967 SECURITY_READ_ONLY_LATE(vm_named_entry_t
) commpage64_entry
= NULL
;
2968 SECURITY_READ_ONLY_LATE(vm_map_t
) commpage32_map
= VM_MAP_NULL
;
2969 SECURITY_READ_ONLY_LATE(vm_map_t
) commpage64_map
= VM_MAP_NULL
;
2971 SECURITY_READ_ONLY_LATE(ipc_port_t
) commpage_text32_handle
= IPC_PORT_NULL
;
2972 SECURITY_READ_ONLY_LATE(ipc_port_t
) commpage_text64_handle
= IPC_PORT_NULL
;
2973 SECURITY_READ_ONLY_LATE(vm_named_entry_t
) commpage_text32_entry
= NULL
;
2974 SECURITY_READ_ONLY_LATE(vm_named_entry_t
) commpage_text64_entry
= NULL
;
2975 SECURITY_READ_ONLY_LATE(vm_map_t
) commpage_text32_map
= VM_MAP_NULL
;
2976 SECURITY_READ_ONLY_LATE(vm_map_t
) commpage_text64_map
= VM_MAP_NULL
;
2978 SECURITY_READ_ONLY_LATE(user32_addr_t
) commpage_text32_location
= 0;
2979 SECURITY_READ_ONLY_LATE(user64_addr_t
) commpage_text64_location
= 0;
2981 #if defined(__i386__) || defined(__x86_64__)
2983 * Create a memory entry, VM submap and pmap for one commpage.
2987 ipc_port_t
*handlep
,
2991 vm_named_entry_t mem_entry
;
2994 SHARED_REGION_TRACE_DEBUG(
2995 ("commpage: -> _init(0x%llx)\n",
2998 kr
= mach_memory_entry_allocate(&mem_entry
,
3000 if (kr
!= KERN_SUCCESS
) {
3001 panic("_vm_commpage_init: could not allocate mem_entry");
3003 new_map
= vm_map_create(pmap_create_options(NULL
, 0, 0), 0, size
, PMAP_CREATE_64BIT
);
3004 if (new_map
== VM_MAP_NULL
) {
3005 panic("_vm_commpage_init: could not allocate VM map");
3007 mem_entry
->backing
.map
= new_map
;
3008 mem_entry
->internal
= TRUE
;
3009 mem_entry
->is_sub_map
= TRUE
;
3010 mem_entry
->offset
= 0;
3011 mem_entry
->protection
= VM_PROT_ALL
;
3012 mem_entry
->size
= size
;
3014 SHARED_REGION_TRACE_DEBUG(
3015 ("commpage: _init(0x%llx) <- %p\n",
3016 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
3022 * Initialize the comm text pages at boot time
3025 vm_commpage_text_init(void)
3027 SHARED_REGION_TRACE_DEBUG(
3028 ("commpage text: ->init()\n"));
3029 #if defined(__i386__) || defined(__x86_64__)
3030 /* create the 32 bit comm text page */
3031 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
3032 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
3033 commpage_text32_entry
= (vm_named_entry_t
) ip_get_kobject(commpage_text32_handle
);
3034 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
3035 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
3036 /* XXX if (cpu_is_64bit_capable()) ? */
3037 /* create the 64-bit comm page */
3038 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
3039 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
3040 commpage_text64_entry
= (vm_named_entry_t
) ip_get_kobject(commpage_text64_handle
);
3041 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
3042 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
3045 commpage_text_populate();
3047 /* populate the routines in here */
3048 SHARED_REGION_TRACE_DEBUG(
3049 ("commpage text: init() <-\n"));
3053 * Initialize the comm pages at boot time.
3056 vm_commpage_init(void)
3058 SHARED_REGION_TRACE_DEBUG(
3059 ("commpage: -> init()\n"));
3061 #if defined(__i386__) || defined(__x86_64__)
3062 /* create the 32-bit comm page */
3063 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
3064 commpage32_entry
= (vm_named_entry_t
) ip_get_kobject(commpage32_handle
);
3065 commpage32_map
= commpage32_entry
->backing
.map
;
3067 /* XXX if (cpu_is_64bit_capable()) ? */
3068 /* create the 64-bit comm page */
3069 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
3070 commpage64_entry
= (vm_named_entry_t
) ip_get_kobject(commpage64_handle
);
3071 commpage64_map
= commpage64_entry
->backing
.map
;
3073 #endif /* __i386__ || __x86_64__ */
3075 /* populate them according to this specific platform */
3076 commpage_populate();
3077 __commpage_setup
= 1;
3078 #if XNU_TARGET_OS_OSX
3079 if (__system_power_source
== 0) {
3080 post_sys_powersource_internal(0, 1);
3082 #endif /* XNU_TARGET_OS_OSX */
3084 SHARED_REGION_TRACE_DEBUG(
3085 ("commpage: init() <-\n"));
3089 * Enter the appropriate comm page into the task's address space.
3090 * This is called at exec() time via vm_map_exec().
3098 #if defined(__arm__)
3099 #pragma unused(is64bit)
3102 return KERN_SUCCESS
;
3103 #elif defined(__arm64__)
3104 #pragma unused(is64bit)
3107 pmap_insert_sharedpage(vm_map_pmap(map
));
3108 return KERN_SUCCESS
;
3110 ipc_port_t commpage_handle
, commpage_text_handle
;
3111 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
3112 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
3114 vm_map_kernel_flags_t vmk_flags
;
3117 SHARED_REGION_TRACE_DEBUG(
3118 ("commpage: -> enter(%p,%p)\n",
3119 (void *)VM_KERNEL_ADDRPERM(map
),
3120 (void *)VM_KERNEL_ADDRPERM(task
)));
3122 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
3123 /* the comm page is likely to be beyond the actual end of the VM map */
3124 vm_flags
= VM_FLAGS_FIXED
;
3125 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
3126 vmk_flags
.vmkf_beyond_max
= TRUE
;
3128 /* select the appropriate comm page for this task */
3129 assert(!(is64bit
^ vm_map_is_64bit(map
)));
3131 commpage_handle
= commpage64_handle
;
3132 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
3133 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
3134 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
3135 objc_address
= _COMM_PAGE64_OBJC_BASE
;
3136 commpage_text_handle
= commpage_text64_handle
;
3137 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
3139 commpage_handle
= commpage32_handle
;
3141 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
3142 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
3143 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
3144 objc_address
= _COMM_PAGE32_OBJC_BASE
;
3145 commpage_text_handle
= commpage_text32_handle
;
3146 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
3149 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
3150 if ((commpage_address
& (pmap_commpage_size_min(map
->pmap
) - 1)) == 0 &&
3151 (commpage_size
& (pmap_commpage_size_min(map
->pmap
) - 1)) == 0) {
3152 /* the commpage is properly aligned or sized for pmap-nesting */
3153 tag
= VM_MEMORY_SHARED_PMAP
;
3154 vmk_flags
.vmkf_nested_pmap
= TRUE
;
3156 /* map the comm page in the task's address space */
3157 assert(commpage_handle
!= IPC_PORT_NULL
);
3158 kr
= vm_map_enter_mem_object(
3172 if (kr
!= KERN_SUCCESS
) {
3173 SHARED_REGION_TRACE_ERROR(
3174 ("commpage: enter(%p,0x%llx,0x%llx) "
3175 "commpage %p mapping failed 0x%x\n",
3176 (void *)VM_KERNEL_ADDRPERM(map
),
3177 (long long)commpage_address
,
3178 (long long)commpage_size
,
3179 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
3182 /* map the comm text page in the task's address space */
3183 assert(commpage_text_handle
!= IPC_PORT_NULL
);
3184 kr
= vm_map_enter_mem_object(
3186 &commpage_text_address
,
3192 commpage_text_handle
,
3195 VM_PROT_READ
| VM_PROT_EXECUTE
,
3196 VM_PROT_READ
| VM_PROT_EXECUTE
,
3198 if (kr
!= KERN_SUCCESS
) {
3199 SHARED_REGION_TRACE_ERROR(
3200 ("commpage text: enter(%p,0x%llx,0x%llx) "
3201 "commpage text %p mapping failed 0x%x\n",
3202 (void *)VM_KERNEL_ADDRPERM(map
),
3203 (long long)commpage_text_address
,
3204 (long long)commpage_text_size
,
3205 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
3209 * Since we're here, we also pre-allocate some virtual space for the
3210 * Objective-C run-time, if needed...
3212 if (objc_size
!= 0) {
3213 kr
= vm_map_enter_mem_object(
3226 VM_INHERIT_DEFAULT
);
3227 if (kr
!= KERN_SUCCESS
) {
3228 SHARED_REGION_TRACE_ERROR(
3229 ("commpage: enter(%p,0x%llx,0x%llx) "
3230 "objc mapping failed 0x%x\n",
3231 (void *)VM_KERNEL_ADDRPERM(map
),
3232 (long long)objc_address
,
3233 (long long)objc_size
, kr
));
3237 SHARED_REGION_TRACE_DEBUG(
3238 ("commpage: enter(%p,%p) <- 0x%x\n",
3239 (void *)VM_KERNEL_ADDRPERM(map
),
3240 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
3246 vm_shared_region_slide(
3248 mach_vm_offset_t entry_start_address
,
3249 mach_vm_size_t entry_size
,
3250 mach_vm_offset_t slide_start
,
3251 mach_vm_size_t slide_size
,
3252 mach_vm_offset_t slid_mapping
,
3253 memory_object_control_t sr_file_control
,
3256 vm_shared_region_t sr
;
3257 kern_return_t error
;
3259 SHARED_REGION_TRACE_DEBUG(
3260 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
3261 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
3263 sr
= vm_shared_region_get(current_task());
3265 printf("%s: no shared region?\n", __FUNCTION__
);
3266 SHARED_REGION_TRACE_DEBUG(
3267 ("vm_shared_region_slide: <- %d (no shared region)\n",
3269 return KERN_FAILURE
;
3273 * Protect from concurrent access.
3275 vm_shared_region_lock();
3276 while (sr
->sr_slide_in_progress
) {
3277 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
3280 sr
->sr_slide_in_progress
= TRUE
;
3281 vm_shared_region_unlock();
3283 error
= vm_shared_region_slide_mapping(sr
,
3284 (user_addr_t
)slide_start
,
3286 entry_start_address
,
3293 printf("slide_info initialization failed with kr=%d\n", error
);
3296 vm_shared_region_lock();
3298 assert(sr
->sr_slide_in_progress
);
3299 sr
->sr_slide_in_progress
= FALSE
;
3300 thread_wakeup(&sr
->sr_slide_in_progress
);
3302 #if XNU_TARGET_OS_OSX
3303 if (error
== KERN_SUCCESS
) {
3304 shared_region_completed_slide
= TRUE
;
3306 #endif /* XNU_TARGET_OS_OSX */
3307 vm_shared_region_unlock();
3309 vm_shared_region_deallocate(sr
);
3311 SHARED_REGION_TRACE_DEBUG(
3312 ("vm_shared_region_slide: <- %d\n",
3319 * Used during Authenticated Root Volume macOS boot.
3320 * Launchd re-execs itself and wants the new launchd to use
3321 * the shared cache from the new root volume. This call
3322 * makes all the existing shared caches stale to allow
3326 vm_shared_region_pivot(void)
3328 vm_shared_region_t shared_region
= NULL
;
3330 vm_shared_region_lock();
3332 queue_iterate(&vm_shared_region_queue
, shared_region
, vm_shared_region_t
, sr_q
) {
3333 assert(shared_region
->sr_ref_count
> 0);
3334 shared_region
->sr_stale
= TRUE
;
3335 if (shared_region
->sr_timer_call
) {
3337 * We have a shared region ready to be destroyed
3338 * and just waiting for a delayed timer to fire.
3339 * Marking it stale cements its ineligibility to
3340 * be used ever again. So let's shorten the timer
3341 * aggressively down to 10 milliseconds and get rid of it.
3342 * This is a single quantum and we don't need to go
3343 * shorter than this duration. We want it to be short
3344 * enough, however, because we could have an unmount
3345 * of the volume hosting this shared region just behind
3349 assert(shared_region
->sr_ref_count
== 1);
3352 * Free the old timer call. Returns with a reference held.
3353 * If the old timer has fired and is waiting for the vm_shared_region_lock
3354 * lock, we will just return with an additional ref_count i.e. 2.
3355 * The old timer will then fire and just drop the ref count down to 1
3356 * with no other modifications.
3358 vm_shared_region_reference_locked(shared_region
);
3360 /* set up the timer. Keep the reference from above for this timer.*/
3361 shared_region
->sr_timer_call
= thread_call_allocate(
3362 (thread_call_func_t
) vm_shared_region_timeout
,
3363 (thread_call_param_t
) shared_region
);
3365 /* schedule the timer */
3366 clock_interval_to_deadline(10, /* 10 milliseconds */
3369 thread_call_enter_delayed(shared_region
->sr_timer_call
,
3372 SHARED_REGION_TRACE_DEBUG(
3373 ("shared_region: pivot(%p): armed timer\n",
3374 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
3378 vm_shared_region_unlock();
3382 * Routine to mark any non-standard slide shared cache region as stale.
3383 * This causes the next "reslide" spawn to create a new shared region.
3386 vm_shared_region_reslide_stale(void)
3388 #if __has_feature(ptrauth_calls)
3389 vm_shared_region_t shared_region
= NULL
;
3391 vm_shared_region_lock();
3393 queue_iterate(&vm_shared_region_queue
, shared_region
, vm_shared_region_t
, sr_q
) {
3394 assert(shared_region
->sr_ref_count
> 0);
3395 if (!shared_region
->sr_stale
&& shared_region
->sr_reslide
) {
3396 shared_region
->sr_stale
= TRUE
;
3397 vm_shared_region_reslide_count
++;
3401 vm_shared_region_unlock();
3402 #endif /* __has_feature(ptrauth_calls) */
3406 * report if the task is using a reslide shared cache region.
3409 vm_shared_region_is_reslide(__unused
struct task
*task
)
3411 bool is_reslide
= FALSE
;
3412 #if !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls)
3413 vm_shared_region_t sr
= vm_shared_region_get(task
);
3416 is_reslide
= sr
->sr_reslide
;
3417 vm_shared_region_deallocate(sr
);
3419 #endif /* !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) */
3424 * This is called from powermanagement code to let kernel know the current source of power.
3425 * 0 if it is external source (connected to power )
3426 * 1 if it is internal power source ie battery
3429 #if XNU_TARGET_OS_OSX
3430 post_sys_powersource(int i
)
3431 #else /* XNU_TARGET_OS_OSX */
3432 post_sys_powersource(__unused
int i
)
3433 #endif /* XNU_TARGET_OS_OSX */
3435 #if XNU_TARGET_OS_OSX
3436 post_sys_powersource_internal(i
, 0);
3437 #endif /* XNU_TARGET_OS_OSX */
3441 #if XNU_TARGET_OS_OSX
3443 post_sys_powersource_internal(int i
, int internal
)
3445 if (internal
== 0) {
3446 __system_power_source
= i
;
3449 #endif /* XNU_TARGET_OS_OSX */
3452 vm_shared_region_root_dir(
3453 struct vm_shared_region
*sr
)
3457 vm_shared_region_lock();
3458 vnode
= sr
->sr_root_dir
;
3459 vm_shared_region_unlock();