2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment which is defined by:
39 * - Team ID - when we have pointer authentication.
41 * The point of a shared region is to reduce the setup overhead when exec'ing
42 * a new process. A shared region uses a shared VM submap that gets mapped
43 * automatically at exec() time, see vm_map_exec(). The first process of a given
44 * environment sets up the shared region and all further processes in that
45 * environment can re-use that shared region without having to re-create
46 * the same mappings in their VM map. All they need is contained in the shared
49 * The region can also share a pmap (mostly for read-only parts but also for the
50 * initial version of some writable parts), which gets "nested" into the
51 * process's pmap. This reduces the number of soft faults: once one process
52 * brings in a page in the shared region, all the other processes can access
53 * it without having to enter it in their own pmap.
55 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
56 * to map the appropriate shared region in the process's address space.
57 * We look up the appropriate shared region for the process's environment.
58 * If we can't find one, we create a new (empty) one and add it to the list.
59 * Otherwise, we just take an extra reference on the shared region we found.
61 * The "dyld" runtime, mapped into the process's address space at exec() time,
62 * will then use the shared_region_check_np() and shared_region_map_and_slide_np()
63 * system calls to validate and/or populate the shared region with the
64 * appropriate dyld_shared_cache file.
66 * The shared region is inherited on fork() and the child simply takes an
67 * extra reference on its parent's shared region.
69 * When the task terminates, we release the reference on its shared region.
70 * When the last reference is released, we destroy the shared region.
72 * After a chroot(), the calling process keeps using its original shared region,
73 * since that's what was mapped when it was started. But its children
74 * will use a different shared region, because they need to use the shared
75 * cache that's relative to the new root directory.
81 * A "comm page" is an area of memory that is populated by the kernel with
82 * the appropriate platform-specific version of some commonly used code.
83 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
84 * for the native cpu-type. No need to overly optimize translated code
85 * for hardware that is not really there !
87 * The comm pages are created and populated at boot time.
89 * The appropriate comm page is mapped into a process's address space
90 * at exec() time, in vm_map_exec(). It is then inherited on fork().
92 * The comm page is shared between the kernel and all applications of
93 * a given platform. Only the kernel can modify it.
95 * Applications just branch to fixed addresses in the comm page and find
96 * the right version of the code for the platform. There is also some
97 * data provided and updated by the kernel for processes to retrieve easily
98 * without having to do a system call.
103 #include <kern/ipc_tt.h>
104 #include <kern/kalloc.h>
105 #include <kern/thread_call.h>
107 #include <mach/mach_vm.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_shared_region.h>
112 #include <vm/vm_protos.h>
114 #include <machine/commpage.h>
115 #include <machine/cpu_capabilities.h>
116 #include <sys/random.h>
118 #if defined (__arm__) || defined(__arm64__)
119 #include <arm/cpu_data_internal.h>
120 #include <arm/misc_protos.h>
124 * the following codes are used in the subclass
125 * of the DBG_MACH_SHAREDREGION class
127 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
129 #if __has_feature(ptrauth_calls)
131 #endif /* __has_feature(ptrauth_calls) */
133 /* "dyld" uses this to figure out what the kernel supports */
134 int shared_region_version
= 3;
136 /* trace level, output is sent to the system log file */
137 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
139 /* should local (non-chroot) shared regions persist when no task uses them ? */
140 int shared_region_persistence
= 0; /* no by default */
143 /* delay in seconds before reclaiming an unused shared region */
144 TUNABLE_WRITEABLE(int, shared_region_destroy_delay
, "vm_shared_region_destroy_delay", 120);
146 struct vm_shared_region
*init_task_shared_region
= NULL
;
148 #ifndef CONFIG_EMBEDDED
150 * Only one cache gets to slide on Desktop, since we can't
151 * tear down slide info properly today and the desktop actually
152 * produces lots of shared caches.
154 boolean_t shared_region_completed_slide
= FALSE
;
157 /* this lock protects all the shared region data structures */
158 static LCK_GRP_DECLARE(vm_shared_region_lck_grp
, "vm shared region");
159 static LCK_MTX_DECLARE(vm_shared_region_lock
, &vm_shared_region_lck_grp
);
161 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
162 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
163 #define vm_shared_region_sleep(event, interruptible) \
164 lck_mtx_sleep(&vm_shared_region_lock, \
169 /* the list of currently available shared regions (one per environment) */
170 queue_head_t vm_shared_region_queue
= QUEUE_HEAD_INITIALIZER(vm_shared_region_queue
);
171 int vm_shared_region_count
= 0;
172 int vm_shared_region_peak
= 0;
175 * the number of times an event has forced the recalculation of the reslide
176 * shared region slide.
178 #if __has_feature(ptrauth_calls)
179 int vm_shared_region_reslide_count
= 0;
180 #endif /* __has_feature(ptrauth_calls) */
182 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
183 static vm_shared_region_t
vm_shared_region_create(
186 cpu_subtype_t cpu_subtype
,
189 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
191 static kern_return_t
vm_shared_region_slide_sanity_check(vm_shared_region_slide_info_entry_t entry
, mach_vm_size_t size
);
192 static void vm_shared_region_timeout(thread_call_param_t param0
,
193 thread_call_param_t param1
);
194 static kern_return_t
vm_shared_region_slide_mapping(
195 vm_shared_region_t sr
,
196 user_addr_t slide_info_addr
,
197 mach_vm_size_t slide_info_size
,
198 mach_vm_offset_t start
,
200 mach_vm_offset_t slid_mapping
,
202 memory_object_control_t
,
203 vm_prot_t prot
); /* forward */
205 static int __commpage_setup
= 0;
207 static int __system_power_source
= 1; /* init to extrnal power source */
208 static void post_sys_powersource_internal(int i
, int internal
);
211 extern u_int32_t
random(void);
214 * Retrieve a task's shared region and grab an extra reference to
215 * make sure it doesn't disappear while the caller is using it.
216 * The caller is responsible for consuming that extra reference if
220 vm_shared_region_get(
223 vm_shared_region_t shared_region
;
225 SHARED_REGION_TRACE_DEBUG(
226 ("shared_region: -> get(%p)\n",
227 (void *)VM_KERNEL_ADDRPERM(task
)));
230 vm_shared_region_lock();
231 shared_region
= task
->shared_region
;
233 assert(shared_region
->sr_ref_count
> 0);
234 vm_shared_region_reference_locked(shared_region
);
236 vm_shared_region_unlock();
239 SHARED_REGION_TRACE_DEBUG(
240 ("shared_region: get(%p) <- %p\n",
241 (void *)VM_KERNEL_ADDRPERM(task
),
242 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
244 return shared_region
;
248 * Get the base address of the shared region.
249 * That's the address at which it needs to be mapped in the process's address
251 * No need to lock since this data is set when the shared region is
252 * created and is never modified after that. The caller must hold an extra
253 * reference on the shared region to prevent it from being destroyed.
256 vm_shared_region_base_address(
257 vm_shared_region_t shared_region
)
259 SHARED_REGION_TRACE_DEBUG(
260 ("shared_region: -> base_address(%p)\n",
261 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
262 assert(shared_region
->sr_ref_count
> 1);
263 SHARED_REGION_TRACE_DEBUG(
264 ("shared_region: base_address(%p) <- 0x%llx\n",
265 (void *)VM_KERNEL_ADDRPERM(shared_region
),
266 (long long)shared_region
->sr_base_address
));
267 return shared_region
->sr_base_address
;
271 * Get the size of the shared region.
272 * That's the size that needs to be mapped in the process's address
274 * No need to lock since this data is set when the shared region is
275 * created and is never modified after that. The caller must hold an extra
276 * reference on the shared region to prevent it from being destroyed.
279 vm_shared_region_size(
280 vm_shared_region_t shared_region
)
282 SHARED_REGION_TRACE_DEBUG(
283 ("shared_region: -> size(%p)\n",
284 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
285 assert(shared_region
->sr_ref_count
> 1);
286 SHARED_REGION_TRACE_DEBUG(
287 ("shared_region: size(%p) <- 0x%llx\n",
288 (void *)VM_KERNEL_ADDRPERM(shared_region
),
289 (long long)shared_region
->sr_size
));
290 return shared_region
->sr_size
;
294 * Get the memory entry of the shared region.
295 * That's the "memory object" that needs to be mapped in the process's address
297 * No need to lock since this data is set when the shared region is
298 * created and is never modified after that. The caller must hold an extra
299 * reference on the shared region to prevent it from being destroyed.
302 vm_shared_region_mem_entry(
303 vm_shared_region_t shared_region
)
305 SHARED_REGION_TRACE_DEBUG(
306 ("shared_region: -> mem_entry(%p)\n",
307 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
308 assert(shared_region
->sr_ref_count
> 1);
309 SHARED_REGION_TRACE_DEBUG(
310 ("shared_region: mem_entry(%p) <- %p\n",
311 (void *)VM_KERNEL_ADDRPERM(shared_region
),
312 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
313 return shared_region
->sr_mem_entry
;
317 vm_shared_region_vm_map(
318 vm_shared_region_t shared_region
)
320 ipc_port_t sr_handle
;
321 vm_named_entry_t sr_mem_entry
;
324 SHARED_REGION_TRACE_DEBUG(
325 ("shared_region: -> vm_map(%p)\n",
326 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
327 assert(shared_region
->sr_ref_count
> 1);
329 sr_handle
= shared_region
->sr_mem_entry
;
330 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
331 sr_map
= sr_mem_entry
->backing
.map
;
332 assert(sr_mem_entry
->is_sub_map
);
334 SHARED_REGION_TRACE_DEBUG(
335 ("shared_region: vm_map(%p) <- %p\n",
336 (void *)VM_KERNEL_ADDRPERM(shared_region
),
337 (void *)VM_KERNEL_ADDRPERM(sr_map
)));
342 * Set the shared region the process should use.
343 * A NULL new shared region means that we just want to release the old
345 * The caller should already have an extra reference on the new shared region
346 * (if any). We release a reference on the old shared region (if any).
349 vm_shared_region_set(
351 vm_shared_region_t new_shared_region
)
353 vm_shared_region_t old_shared_region
;
355 SHARED_REGION_TRACE_DEBUG(
356 ("shared_region: -> set(%p, %p)\n",
357 (void *)VM_KERNEL_ADDRPERM(task
),
358 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
361 vm_shared_region_lock();
363 old_shared_region
= task
->shared_region
;
364 if (new_shared_region
) {
365 assert(new_shared_region
->sr_ref_count
> 0);
368 task
->shared_region
= new_shared_region
;
370 vm_shared_region_unlock();
373 if (old_shared_region
) {
374 assert(old_shared_region
->sr_ref_count
> 0);
375 vm_shared_region_deallocate(old_shared_region
);
378 SHARED_REGION_TRACE_DEBUG(
379 ("shared_region: set(%p) <- old=%p new=%p\n",
380 (void *)VM_KERNEL_ADDRPERM(task
),
381 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
382 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
386 * Lookup up the shared region for the desired environment.
387 * If none is found, create a new (empty) one.
388 * Grab an extra reference on the returned shared region, to make sure
389 * it doesn't get destroyed before the caller is done with it. The caller
390 * is responsible for consuming that extra reference if necessary.
393 vm_shared_region_lookup(
396 cpu_subtype_t cpu_subtype
,
400 vm_shared_region_t shared_region
;
401 vm_shared_region_t new_shared_region
;
403 SHARED_REGION_TRACE_DEBUG(
404 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n",
405 (void *)VM_KERNEL_ADDRPERM(root_dir
),
406 cputype
, cpu_subtype
, is_64bit
, reslide
));
408 shared_region
= NULL
;
409 new_shared_region
= NULL
;
411 vm_shared_region_lock();
413 queue_iterate(&vm_shared_region_queue
,
417 assert(shared_region
->sr_ref_count
> 0);
418 if (shared_region
->sr_cpu_type
== cputype
&&
419 #if !__has_feature(ptrauth_calls) /* arm64e/arm64 use same region */
420 shared_region
->sr_cpu_subtype
== cpu_subtype
&&
421 #endif /* !__has_feature(ptrauth_calls) */
422 shared_region
->sr_root_dir
== root_dir
&&
423 shared_region
->sr_64bit
== is_64bit
&&
424 #if __has_feature(ptrauth_calls)
425 shared_region
->sr_reslide
== reslide
&&
426 #endif /* __has_feature(ptrauth_calls) */
427 !shared_region
->sr_stale
) {
428 /* found a match ! */
429 vm_shared_region_reference_locked(shared_region
);
433 if (new_shared_region
== NULL
) {
434 /* no match: create a new one */
435 vm_shared_region_unlock();
436 new_shared_region
= vm_shared_region_create(root_dir
,
441 /* do the lookup again, in case we lost a race */
442 vm_shared_region_lock();
445 /* still no match: use our new one */
446 shared_region
= new_shared_region
;
447 new_shared_region
= NULL
;
448 queue_enter(&vm_shared_region_queue
,
452 vm_shared_region_count
++;
453 if (vm_shared_region_count
> vm_shared_region_peak
) {
454 vm_shared_region_peak
= vm_shared_region_count
;
460 vm_shared_region_unlock();
462 if (new_shared_region
) {
464 * We lost a race with someone else to create a new shared
465 * region for that environment. Get rid of our unused one.
467 assert(new_shared_region
->sr_ref_count
== 1);
468 new_shared_region
->sr_ref_count
--;
469 vm_shared_region_destroy(new_shared_region
);
470 new_shared_region
= NULL
;
473 SHARED_REGION_TRACE_DEBUG(
474 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d) <- %p\n",
475 (void *)VM_KERNEL_ADDRPERM(root_dir
),
476 cputype
, cpu_subtype
, is_64bit
, reslide
,
477 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
479 assert(shared_region
->sr_ref_count
> 0);
480 return shared_region
;
484 * Take an extra reference on a shared region.
485 * The vm_shared_region_lock should already be held by the caller.
488 vm_shared_region_reference_locked(
489 vm_shared_region_t shared_region
)
491 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
493 SHARED_REGION_TRACE_DEBUG(
494 ("shared_region: -> reference_locked(%p)\n",
495 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
496 assert(shared_region
->sr_ref_count
> 0);
497 shared_region
->sr_ref_count
++;
498 assert(shared_region
->sr_ref_count
!= 0);
500 if (shared_region
->sr_timer_call
!= NULL
) {
503 /* cancel and free any pending timeout */
504 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
506 thread_call_free(shared_region
->sr_timer_call
);
507 shared_region
->sr_timer_call
= NULL
;
508 /* release the reference held by the cancelled timer */
509 shared_region
->sr_ref_count
--;
511 /* the timer will drop the reference and free itself */
515 SHARED_REGION_TRACE_DEBUG(
516 ("shared_region: reference_locked(%p) <- %d\n",
517 (void *)VM_KERNEL_ADDRPERM(shared_region
),
518 shared_region
->sr_ref_count
));
522 * Take a reference on a shared region.
525 vm_shared_region_reference(vm_shared_region_t shared_region
)
527 SHARED_REGION_TRACE_DEBUG(
528 ("shared_region: -> reference(%p)\n",
529 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
531 vm_shared_region_lock();
532 vm_shared_region_reference_locked(shared_region
);
533 vm_shared_region_unlock();
535 SHARED_REGION_TRACE_DEBUG(
536 ("shared_region: reference(%p) <- %d\n",
537 (void *)VM_KERNEL_ADDRPERM(shared_region
),
538 shared_region
->sr_ref_count
));
542 * Release a reference on the shared region.
543 * Destroy it if there are no references left.
546 vm_shared_region_deallocate(
547 vm_shared_region_t shared_region
)
549 SHARED_REGION_TRACE_DEBUG(
550 ("shared_region: -> deallocate(%p)\n",
551 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
553 vm_shared_region_lock();
555 assert(shared_region
->sr_ref_count
> 0);
557 if (shared_region
->sr_root_dir
== NULL
) {
559 * Local (i.e. based on the boot volume) shared regions
560 * can persist or not based on the "shared_region_persistence"
562 * Make sure that this one complies.
564 * See comments in vm_shared_region_slide() for notes about
565 * shared regions we have slid (which are not torn down currently).
567 if (shared_region_persistence
&&
568 !shared_region
->sr_persists
) {
569 /* make this one persistent */
570 shared_region
->sr_ref_count
++;
571 shared_region
->sr_persists
= TRUE
;
572 } else if (!shared_region_persistence
&&
573 shared_region
->sr_persists
) {
574 /* make this one no longer persistent */
575 assert(shared_region
->sr_ref_count
> 1);
576 shared_region
->sr_ref_count
--;
577 shared_region
->sr_persists
= FALSE
;
581 assert(shared_region
->sr_ref_count
> 0);
582 shared_region
->sr_ref_count
--;
583 SHARED_REGION_TRACE_DEBUG(
584 ("shared_region: deallocate(%p): ref now %d\n",
585 (void *)VM_KERNEL_ADDRPERM(shared_region
),
586 shared_region
->sr_ref_count
));
588 if (shared_region
->sr_ref_count
== 0) {
592 * Even though a shared region is unused, delay a while before
593 * tearing it down, in case a new app launch can use it.
595 if (shared_region
->sr_timer_call
== NULL
&&
596 shared_region_destroy_delay
!= 0 &&
597 !shared_region
->sr_stale
) {
598 /* hold one reference for the timer */
599 assert(!shared_region
->sr_mapping_in_progress
);
600 shared_region
->sr_ref_count
++;
602 /* set up the timer */
603 shared_region
->sr_timer_call
= thread_call_allocate(
604 (thread_call_func_t
) vm_shared_region_timeout
,
605 (thread_call_param_t
) shared_region
);
607 /* schedule the timer */
608 clock_interval_to_deadline(shared_region_destroy_delay
,
611 thread_call_enter_delayed(shared_region
->sr_timer_call
,
614 SHARED_REGION_TRACE_DEBUG(
615 ("shared_region: deallocate(%p): armed timer\n",
616 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
618 vm_shared_region_unlock();
620 /* timer expired: let go of this shared region */
623 * Remove it from the queue first, so no one can find
626 queue_remove(&vm_shared_region_queue
,
630 vm_shared_region_count
--;
631 vm_shared_region_unlock();
633 /* ... and destroy it */
634 vm_shared_region_destroy(shared_region
);
635 shared_region
= NULL
;
638 vm_shared_region_unlock();
641 SHARED_REGION_TRACE_DEBUG(
642 ("shared_region: deallocate(%p) <-\n",
643 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
647 vm_shared_region_timeout(
648 thread_call_param_t param0
,
649 __unused thread_call_param_t param1
)
651 vm_shared_region_t shared_region
;
653 shared_region
= (vm_shared_region_t
) param0
;
655 vm_shared_region_deallocate(shared_region
);
660 * Create a new (empty) shared region for a new environment.
662 static vm_shared_region_t
663 vm_shared_region_create(
666 cpu_subtype_t cpu_subtype
,
668 #if !__has_feature(ptrauth_calls)
670 #endif /* __has_feature(ptrauth_calls) */
674 vm_named_entry_t mem_entry
;
675 ipc_port_t mem_entry_port
;
676 vm_shared_region_t shared_region
;
678 mach_vm_offset_t base_address
, pmap_nesting_start
;
679 mach_vm_size_t size
, pmap_nesting_size
;
681 SHARED_REGION_TRACE_INFO(
682 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n",
683 (void *)VM_KERNEL_ADDRPERM(root_dir
),
684 cputype
, cpu_subtype
, is_64bit
, reslide
));
689 mem_entry_port
= IPC_PORT_NULL
;
690 sub_map
= VM_MAP_NULL
;
692 /* create a new shared region structure... */
693 shared_region
= kalloc(sizeof(*shared_region
));
694 if (shared_region
== NULL
) {
695 SHARED_REGION_TRACE_ERROR(
696 ("shared_region: create: couldn't allocate\n"));
700 /* figure out the correct settings for the desired environment */
703 #if defined(__arm64__)
705 base_address
= SHARED_REGION_BASE_ARM64
;
706 size
= SHARED_REGION_SIZE_ARM64
;
707 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
708 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
710 #elif !defined(__arm__)
712 base_address
= SHARED_REGION_BASE_X86_64
;
713 size
= SHARED_REGION_SIZE_X86_64
;
714 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
715 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
717 case CPU_TYPE_POWERPC
:
718 base_address
= SHARED_REGION_BASE_PPC64
;
719 size
= SHARED_REGION_SIZE_PPC64
;
720 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
721 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
725 SHARED_REGION_TRACE_ERROR(
726 ("shared_region: create: unknown cpu type %d\n",
728 kfree(shared_region
, sizeof(*shared_region
));
729 shared_region
= NULL
;
734 #if defined(__arm__) || defined(__arm64__)
736 base_address
= SHARED_REGION_BASE_ARM
;
737 size
= SHARED_REGION_SIZE_ARM
;
738 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
739 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
743 base_address
= SHARED_REGION_BASE_I386
;
744 size
= SHARED_REGION_SIZE_I386
;
745 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
746 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
748 case CPU_TYPE_POWERPC
:
749 base_address
= SHARED_REGION_BASE_PPC
;
750 size
= SHARED_REGION_SIZE_PPC
;
751 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
752 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
756 SHARED_REGION_TRACE_ERROR(
757 ("shared_region: create: unknown cpu type %d\n",
759 kfree(shared_region
, sizeof(*shared_region
));
760 shared_region
= NULL
;
765 /* create a memory entry structure and a Mach port handle */
766 kr
= mach_memory_entry_allocate(&mem_entry
, &mem_entry_port
);
767 if (kr
!= KERN_SUCCESS
) {
768 kfree(shared_region
, sizeof(*shared_region
));
769 shared_region
= NULL
;
770 SHARED_REGION_TRACE_ERROR(
771 ("shared_region: create: "
772 "couldn't allocate mem_entry\n"));
776 #if defined(__arm__) || defined(__arm64__)
778 struct pmap
*pmap_nested
;
780 pmap_flags
|= is_64bit
? PMAP_CREATE_64BIT
: 0;
783 pmap_nested
= pmap_create_options(NULL
, 0, pmap_flags
);
784 if (pmap_nested
!= PMAP_NULL
) {
785 pmap_set_nested(pmap_nested
);
786 sub_map
= vm_map_create(pmap_nested
, 0, (vm_map_offset_t
)size
, TRUE
);
787 #if defined(__arm64__)
789 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
790 /* enforce 16KB alignment of VM map entries */
791 vm_map_set_page_shift(sub_map
,
792 SIXTEENK_PAGE_SHIFT
);
795 #elif (__ARM_ARCH_7K__ >= 2)
796 /* enforce 16KB alignment for watch targets with new ABI */
797 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
798 #endif /* __arm64__ */
800 sub_map
= VM_MAP_NULL
;
804 /* create a VM sub map and its pmap */
805 sub_map
= vm_map_create(pmap_create_options(NULL
, 0, is_64bit
), 0, size
, TRUE
);
807 if (sub_map
== VM_MAP_NULL
) {
808 ipc_port_release_send(mem_entry_port
);
809 kfree(shared_region
, sizeof(*shared_region
));
810 shared_region
= NULL
;
811 SHARED_REGION_TRACE_ERROR(("shared_region: create: couldn't allocate map\n"));
815 /* shared regions should always enforce code-signing */
816 vm_map_cs_enforcement_set(sub_map
, true);
817 assert(vm_map_cs_enforcement(sub_map
));
818 assert(pmap_get_vm_map_cs_enforced(vm_map_pmap(sub_map
)));
820 assert(!sub_map
->disable_vmentry_reuse
);
821 sub_map
->is_nested_map
= TRUE
;
823 /* make the memory entry point to the VM sub map */
824 mem_entry
->is_sub_map
= TRUE
;
825 mem_entry
->backing
.map
= sub_map
;
826 mem_entry
->size
= size
;
827 mem_entry
->protection
= VM_PROT_ALL
;
829 /* make the shared region point at the memory entry */
830 shared_region
->sr_mem_entry
= mem_entry_port
;
832 /* fill in the shared region's environment and settings */
833 shared_region
->sr_base_address
= base_address
;
834 shared_region
->sr_size
= size
;
835 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
836 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
837 shared_region
->sr_cpu_type
= cputype
;
838 shared_region
->sr_cpu_subtype
= cpu_subtype
;
839 shared_region
->sr_64bit
= (uint8_t)is_64bit
;
840 shared_region
->sr_root_dir
= root_dir
;
842 queue_init(&shared_region
->sr_q
);
843 shared_region
->sr_mapping_in_progress
= FALSE
;
844 shared_region
->sr_slide_in_progress
= FALSE
;
845 shared_region
->sr_persists
= FALSE
;
846 shared_region
->sr_stale
= FALSE
;
847 shared_region
->sr_timer_call
= NULL
;
848 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
850 /* grab a reference for the caller */
851 shared_region
->sr_ref_count
= 1;
853 shared_region
->sr_slide
= 0; /* not slid yet */
855 /* Initialize UUID and other metadata */
856 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
857 shared_region
->sr_uuid_copied
= FALSE
;
858 shared_region
->sr_images_count
= 0;
859 shared_region
->sr_images
= NULL
;
860 #if __has_feature(ptrauth_calls)
861 shared_region
->sr_reslide
= reslide
;
862 shared_region
->sr_num_auth_section
= 0;
863 for (uint_t i
= 0; i
< NUM_SR_AUTH_SECTIONS
; ++i
) {
864 shared_region
->sr_auth_section
[i
] = NULL
;
866 shared_region
->sr_num_auth_section
= 0;
867 #endif /* __has_feature(ptrauth_calls) */
871 SHARED_REGION_TRACE_INFO(
872 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d"
873 "base=0x%llx,size=0x%llx) <- "
874 "%p mem=(%p,%p) map=%p pmap=%p\n",
875 (void *)VM_KERNEL_ADDRPERM(root_dir
),
876 cputype
, cpu_subtype
, is_64bit
, reslide
,
877 (long long)base_address
,
879 (void *)VM_KERNEL_ADDRPERM(shared_region
),
880 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
881 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
882 (void *)VM_KERNEL_ADDRPERM(sub_map
),
883 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
885 SHARED_REGION_TRACE_INFO(
886 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
887 "base=0x%llx,size=0x%llx) <- NULL",
888 (void *)VM_KERNEL_ADDRPERM(root_dir
),
889 cputype
, cpu_subtype
, is_64bit
,
890 (long long)base_address
,
893 return shared_region
;
897 * Destroy a now-unused shared region.
898 * The shared region is no longer in the queue and can not be looked up.
901 vm_shared_region_destroy(
902 vm_shared_region_t shared_region
)
904 vm_named_entry_t mem_entry
;
907 SHARED_REGION_TRACE_INFO(
908 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
909 (void *)VM_KERNEL_ADDRPERM(shared_region
),
910 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
911 shared_region
->sr_cpu_type
,
912 shared_region
->sr_cpu_subtype
,
913 shared_region
->sr_64bit
));
915 assert(shared_region
->sr_ref_count
== 0);
916 assert(!shared_region
->sr_persists
);
918 mem_entry
= (vm_named_entry_t
) ip_get_kobject(shared_region
->sr_mem_entry
);
919 assert(mem_entry
->is_sub_map
);
920 assert(!mem_entry
->internal
);
921 assert(!mem_entry
->is_copy
);
922 map
= mem_entry
->backing
.map
;
925 * Clean up the pmap first. The virtual addresses that were
926 * entered in this possibly "nested" pmap may have different values
927 * than the VM map's min and max offsets, if the VM sub map was
928 * mapped at a non-zero offset in the processes' main VM maps, which
929 * is usually the case, so the clean-up we do in vm_map_destroy() would
933 pmap_remove(map
->pmap
,
934 (vm_map_offset_t
)shared_region
->sr_base_address
,
935 (vm_map_offset_t
)(shared_region
->sr_base_address
+ shared_region
->sr_size
));
939 * Release our (one and only) handle on the memory entry.
940 * This will generate a no-senders notification, which will be processed
941 * by ipc_kobject_notify(), which will release the one and only
942 * reference on the memory entry and cause it to be destroyed, along
943 * with the VM sub map and its pmap.
945 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
947 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
949 if (shared_region
->sr_timer_call
) {
950 thread_call_free(shared_region
->sr_timer_call
);
953 #if __has_feature(ptrauth_calls)
955 * Free the cached copies of slide_info for the AUTH regions.
957 for (uint_t i
= 0; i
< shared_region
->sr_num_auth_section
; ++i
) {
958 vm_shared_region_slide_info_t si
= shared_region
->sr_auth_section
[i
];
960 vm_object_deallocate(si
->si_slide_object
);
961 kheap_free(KHEAP_DATA_BUFFERS
, si
->si_slide_info_entry
, si
->si_slide_info_size
);
962 kfree(si
, sizeof *si
);
963 shared_region
->sr_auth_section
[i
] = NULL
;
966 shared_region
->sr_num_auth_section
= 0;
967 #endif /* __has_feature(ptrauth_calls) */
969 /* release the shared region structure... */
970 kfree(shared_region
, sizeof(*shared_region
));
972 SHARED_REGION_TRACE_DEBUG(
973 ("shared_region: destroy(%p) <-\n",
974 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
975 shared_region
= NULL
;
979 * Gets the address of the first (in time) mapping in the shared region.
982 vm_shared_region_start_address(
983 vm_shared_region_t shared_region
,
984 mach_vm_offset_t
*start_address
)
987 mach_vm_offset_t sr_base_address
;
988 mach_vm_offset_t sr_first_mapping
;
990 SHARED_REGION_TRACE_DEBUG(
991 ("shared_region: -> start_address(%p)\n",
992 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
993 assert(shared_region
->sr_ref_count
> 1);
995 vm_shared_region_lock();
998 * Wait if there's another thread establishing a mapping
999 * in this shared region right when we're looking at it.
1000 * We want a consistent view of the map...
1002 while (shared_region
->sr_mapping_in_progress
) {
1003 /* wait for our turn... */
1004 assert(shared_region
->sr_ref_count
> 1);
1005 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1008 assert(!shared_region
->sr_mapping_in_progress
);
1009 assert(shared_region
->sr_ref_count
> 1);
1011 sr_base_address
= shared_region
->sr_base_address
;
1012 sr_first_mapping
= shared_region
->sr_first_mapping
;
1014 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
1015 /* shared region is empty */
1016 kr
= KERN_INVALID_ADDRESS
;
1019 *start_address
= sr_base_address
+ sr_first_mapping
;
1023 vm_shared_region_unlock();
1025 SHARED_REGION_TRACE_DEBUG(
1026 ("shared_region: start_address(%p) <- 0x%llx\n",
1027 (void *)VM_KERNEL_ADDRPERM(shared_region
),
1028 (long long)shared_region
->sr_base_address
));
1034 * Look up a pre-existing mapping in shared region, for replacement.
1035 * Takes an extra object reference if found.
1037 static kern_return_t
1038 find_mapping_to_slide(vm_map_t map
, vm_map_address_t addr
, vm_map_entry_t entry
)
1040 vm_map_entry_t found
;
1042 /* find the shared region's map entry to slide */
1043 vm_map_lock_read(map
);
1044 if (!vm_map_lookup_entry(map
, addr
, &found
)) {
1045 /* no mapping there */
1047 return KERN_INVALID_ARGUMENT
;
1051 /* extra ref to keep object alive while map is unlocked */
1052 vm_object_reference(VME_OBJECT(found
));
1053 vm_map_unlock_read(map
);
1054 return KERN_SUCCESS
;
1057 #if __has_feature(ptrauth_calls)
1060 * Determine if this task is actually using pointer signing.
1063 task_sign_pointers(task_t task
)
1067 !task
->map
->pmap
->disable_jop
) {
1074 * If the shared region contains mappings that are authenticated, then
1075 * remap them into the task private map.
1077 * Failures are possible in this routine when jetsam kills a process
1078 * just as dyld is trying to set it up. The vm_map and task shared region
1079 * info get torn down w/o waiting for this thread to finish up.
1081 __attribute__((noinline
))
1083 vm_shared_region_auth_remap(vm_shared_region_t sr
)
1085 memory_object_t sr_pager
= MEMORY_OBJECT_NULL
;
1086 task_t task
= current_task();
1087 vm_shared_region_slide_info_t si
;
1091 struct vm_map_entry tmp_entry_store
= {0};
1092 vm_map_entry_t tmp_entry
= NULL
;
1094 vm_map_kernel_flags_t vmk_flags
;
1095 vm_map_offset_t map_addr
;
1096 kern_return_t kr
= KERN_SUCCESS
;
1097 boolean_t use_ptr_auth
= task_sign_pointers(task
);
1100 * Don't do this more than once and avoid any race conditions in finishing it.
1102 vm_shared_region_lock();
1103 while (sr
->sr_mapping_in_progress
) {
1104 /* wait for our turn... */
1105 vm_shared_region_sleep(&sr
->sr_mapping_in_progress
, THREAD_UNINT
);
1107 assert(!sr
->sr_mapping_in_progress
);
1108 assert(sr
->sr_ref_count
> 1);
1110 /* Just return if already done. */
1111 if (task
->shared_region_auth_remapped
) {
1112 vm_shared_region_unlock();
1113 return KERN_SUCCESS
;
1116 /* let others know to wait while we're working in this shared region */
1117 sr
->sr_mapping_in_progress
= TRUE
;
1118 vm_shared_region_unlock();
1121 * Remap any sections with pointer authentications into the private map.
1123 for (i
= 0; i
< sr
->sr_num_auth_section
; ++i
) {
1124 si
= sr
->sr_auth_section
[i
];
1126 assert(si
->si_ptrauth
);
1129 * We have mapping that needs to be private.
1130 * Look for an existing slid mapping's pager with matching
1131 * object, offset, slide info and shared_region_id to reuse.
1133 object
= si
->si_slide_object
;
1134 sr_pager
= shared_region_pager_match(object
, si
->si_start
, si
,
1135 use_ptr_auth
? task
->jop_pid
: 0);
1136 if (sr_pager
== MEMORY_OBJECT_NULL
) {
1142 * verify matching jop_pid for this task and this pager
1145 shared_region_pager_match_task_key(sr_pager
, task
);
1148 sr_map
= vm_shared_region_vm_map(sr
);
1151 kr
= find_mapping_to_slide(sr_map
, si
->si_slid_address
- sr
->sr_base_address
, &tmp_entry_store
);
1152 if (kr
!= KERN_SUCCESS
) {
1155 tmp_entry
= &tmp_entry_store
;
1158 * Check that the object exactly covers the region to slide.
1160 if (VME_OFFSET(tmp_entry
) != si
->si_start
||
1161 tmp_entry
->vme_end
- tmp_entry
->vme_start
!= si
->si_end
- si
->si_start
) {
1167 * map the pager over the portion of the mapping that needs sliding
1169 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
1170 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1171 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
1172 map_addr
= si
->si_slid_address
;
1173 kr
= vm_map_enter_mem_object(task
->map
,
1175 si
->si_end
- si
->si_start
,
1176 (mach_vm_offset_t
) 0,
1179 VM_KERN_MEMORY_NONE
,
1180 (ipc_port_t
)(uintptr_t) sr_pager
,
1183 tmp_entry
->protection
,
1184 tmp_entry
->max_protection
,
1185 tmp_entry
->inheritance
);
1186 memory_object_deallocate(sr_pager
);
1187 sr_pager
= MEMORY_OBJECT_NULL
;
1188 if (kr
!= KERN_SUCCESS
) {
1191 assertf(map_addr
== si
->si_slid_address
,
1192 "map_addr=0x%llx si_slid_address=0x%llx tmp_entry=%p\n",
1194 (uint64_t)si
->si_slid_address
,
1197 /* Drop the ref count grabbed by find_mapping_to_slide */
1198 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1204 /* Drop the ref count grabbed by find_mapping_to_slide */
1205 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1210 * Drop any extra reference to the pager in case we're quitting due to an error above.
1212 if (sr_pager
!= MEMORY_OBJECT_NULL
) {
1213 memory_object_deallocate(sr_pager
);
1217 * Mark the region as having it's auth sections remapped.
1219 vm_shared_region_lock();
1220 task
->shared_region_auth_remapped
= TRUE
;
1221 sr
->sr_mapping_in_progress
= FALSE
;
1222 thread_wakeup((event_t
)&sr
->sr_mapping_in_progress
);
1223 vm_shared_region_unlock();
1226 #endif /* __has_feature(ptrauth_calls) */
1229 vm_shared_region_undo_mappings(
1231 mach_vm_offset_t sr_base_address
,
1232 struct _sr_file_mappings
*srf_mappings
,
1233 struct _sr_file_mappings
*srf_mappings_current
,
1234 unsigned int srf_current_mappings_count
)
1237 vm_shared_region_t shared_region
= NULL
;
1238 boolean_t reset_shared_region_state
= FALSE
;
1239 struct _sr_file_mappings
*srfmp
;
1240 unsigned int mappings_count
;
1241 struct shared_file_mapping_slide_np
*mappings
;
1243 shared_region
= vm_shared_region_get(current_task());
1244 if (shared_region
== NULL
) {
1245 printf("Failed to undo mappings because of NULL shared region.\n");
1249 if (sr_map
== NULL
) {
1250 ipc_port_t sr_handle
;
1251 vm_named_entry_t sr_mem_entry
;
1253 vm_shared_region_lock();
1254 assert(shared_region
->sr_ref_count
> 1);
1256 while (shared_region
->sr_mapping_in_progress
) {
1257 /* wait for our turn... */
1258 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1261 assert(!shared_region
->sr_mapping_in_progress
);
1262 assert(shared_region
->sr_ref_count
> 1);
1263 /* let others know we're working in this shared region */
1264 shared_region
->sr_mapping_in_progress
= TRUE
;
1266 vm_shared_region_unlock();
1268 reset_shared_region_state
= TRUE
;
1270 /* no need to lock because this data is never modified... */
1271 sr_handle
= shared_region
->sr_mem_entry
;
1272 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1273 sr_map
= sr_mem_entry
->backing
.map
;
1274 sr_base_address
= shared_region
->sr_base_address
;
1277 * Undo the mappings we've established so far.
1279 for (srfmp
= &srf_mappings
[0];
1280 srfmp
<= srf_mappings_current
;
1282 mappings
= srfmp
->mappings
;
1283 mappings_count
= srfmp
->mappings_count
;
1284 if (srfmp
== srf_mappings_current
) {
1285 mappings_count
= srf_current_mappings_count
;
1288 for (j
= 0; j
< mappings_count
; j
++) {
1291 if (mappings
[j
].sms_size
== 0) {
1293 * We didn't establish this
1294 * mapping, so nothing to undo.
1298 SHARED_REGION_TRACE_INFO(
1299 ("shared_region: mapping[%d]: "
1300 "address:0x%016llx "
1303 "maxprot:0x%x prot:0x%x: "
1306 (long long)mappings
[j
].sms_address
,
1307 (long long)mappings
[j
].sms_size
,
1308 (long long)mappings
[j
].sms_file_offset
,
1309 mappings
[j
].sms_max_prot
,
1310 mappings
[j
].sms_init_prot
));
1311 kr2
= mach_vm_deallocate(
1313 (mappings
[j
].sms_address
-
1315 mappings
[j
].sms_size
);
1316 assert(kr2
== KERN_SUCCESS
);
1320 if (reset_shared_region_state
) {
1321 vm_shared_region_lock();
1322 assert(shared_region
->sr_ref_count
> 1);
1323 assert(shared_region
->sr_mapping_in_progress
);
1324 /* we're done working on that shared region */
1325 shared_region
->sr_mapping_in_progress
= FALSE
;
1326 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1327 vm_shared_region_unlock();
1328 reset_shared_region_state
= FALSE
;
1331 vm_shared_region_deallocate(shared_region
);
1335 * For now we only expect to see at most 2 regions to relocate/authenticate
1336 * per file. One that's VM_PROT_SLIDE and one VM_PROT_SLIDE | VM_PROT_NOAUTH.
1338 #define VMSR_NUM_SLIDES 2
1341 * First part of vm_shared_region_map_file(). Split out to
1342 * avoid kernel stack overflow.
1344 __attribute__((noinline
))
1345 static kern_return_t
1346 vm_shared_region_map_file_setup(
1347 vm_shared_region_t shared_region
,
1348 int sr_file_mappings_count
,
1349 struct _sr_file_mappings
*sr_file_mappings
,
1350 unsigned int *mappings_to_slide_cnt
,
1351 struct shared_file_mapping_slide_np
**mappings_to_slide
,
1352 mach_vm_offset_t
*slid_mappings
,
1353 memory_object_control_t
*slid_file_controls
,
1354 mach_vm_offset_t
*first_mapping
,
1355 mach_vm_offset_t
*file_first_mappings
,
1356 mach_vm_offset_t
*sfm_min_address
,
1357 mach_vm_offset_t
*sfm_max_address
,
1358 vm_map_t
*sr_map_ptr
,
1359 vm_map_offset_t
*lowest_unnestable_addr_ptr
)
1361 kern_return_t kr
= KERN_SUCCESS
;
1362 memory_object_control_t file_control
;
1363 vm_object_t file_object
;
1364 ipc_port_t sr_handle
;
1365 vm_named_entry_t sr_mem_entry
;
1367 mach_vm_offset_t sr_base_address
;
1369 mach_port_t map_port
;
1370 vm_map_offset_t target_address
;
1372 vm_object_size_t obj_size
;
1373 vm_map_offset_t lowest_unnestable_addr
= 0;
1374 vm_map_kernel_flags_t vmk_flags
;
1375 mach_vm_offset_t sfm_end
;
1376 uint32_t mappings_count
;
1377 struct shared_file_mapping_slide_np
*mappings
;
1378 struct _sr_file_mappings
*srfmp
;
1379 unsigned int current_file_index
= 0;
1381 vm_shared_region_lock();
1382 assert(shared_region
->sr_ref_count
> 1);
1385 * Make sure we handle only one mapping at a time in a given
1386 * shared region, to avoid race conditions. This should not
1387 * happen frequently...
1389 while (shared_region
->sr_mapping_in_progress
) {
1390 /* wait for our turn... */
1391 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1394 assert(!shared_region
->sr_mapping_in_progress
);
1395 assert(shared_region
->sr_ref_count
> 1);
1396 /* let others know we're working in this shared region */
1397 shared_region
->sr_mapping_in_progress
= TRUE
;
1399 vm_shared_region_unlock();
1401 /* no need to lock because this data is never modified... */
1402 sr_handle
= shared_region
->sr_mem_entry
;
1403 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1404 sr_map
= sr_mem_entry
->backing
.map
;
1405 sr_base_address
= shared_region
->sr_base_address
;
1407 SHARED_REGION_TRACE_DEBUG(
1408 ("shared_region: -> map(%p)\n",
1409 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
1415 /* process all the files to be mapped */
1416 for (srfmp
= &sr_file_mappings
[0];
1417 srfmp
< &sr_file_mappings
[sr_file_mappings_count
];
1419 mappings_count
= srfmp
->mappings_count
;
1420 mappings
= srfmp
->mappings
;
1421 file_control
= srfmp
->file_control
;
1423 if (mappings_count
== 0) {
1424 /* no mappings here... */
1429 * The code below can only correctly "slide" (perform relocations) for one
1430 * value of the slide amount. So if a file has a non-zero slide, it has to
1431 * match any previous value. A zero slide value is ok for things that are
1432 * just directly mapped.
1434 if (shared_region
->sr_slide
== 0 && srfmp
->slide
!= 0) {
1435 shared_region
->sr_slide
= srfmp
->slide
;
1436 } else if (shared_region
->sr_slide
!= 0 &&
1437 srfmp
->slide
!= 0 &&
1438 shared_region
->sr_slide
!= srfmp
->slide
) {
1439 SHARED_REGION_TRACE_ERROR(
1440 ("shared_region: more than 1 non-zero slide value amount "
1441 "slide 1:0x%x slide 2:0x%x\n ",
1442 shared_region
->sr_slide
, srfmp
->slide
));
1443 kr
= KERN_INVALID_ARGUMENT
;
1448 if ((shared_region
->sr_64bit
||
1449 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1450 ((srfmp
->slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1451 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1452 __FUNCTION__
, srfmp
->slide
);
1453 kr
= KERN_INVALID_ARGUMENT
;
1456 #endif /* __arm64__ */
1458 /* get the VM object associated with the file to be mapped */
1459 file_object
= memory_object_control_to_vm_object(file_control
);
1460 assert(file_object
);
1462 /* establish the mappings for that file */
1463 for (i
= 0; i
< mappings_count
; i
++) {
1464 SHARED_REGION_TRACE_INFO(
1465 ("shared_region: mapping[%d]: "
1466 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1467 "maxprot:0x%x prot:0x%x\n",
1469 (long long)mappings
[i
].sms_address
,
1470 (long long)mappings
[i
].sms_size
,
1471 (long long)mappings
[i
].sms_file_offset
,
1472 mappings
[i
].sms_max_prot
,
1473 mappings
[i
].sms_init_prot
));
1475 if (mappings
[i
].sms_address
< *sfm_min_address
) {
1476 *sfm_min_address
= mappings
[i
].sms_address
;
1479 if (os_add_overflow(mappings
[i
].sms_address
,
1480 mappings
[i
].sms_size
,
1482 (vm_map_round_page(sfm_end
, VM_MAP_PAGE_MASK(sr_map
)) <
1483 mappings
[i
].sms_address
)) {
1485 kr
= KERN_INVALID_ARGUMENT
;
1488 if (sfm_end
> *sfm_max_address
) {
1489 *sfm_max_address
= sfm_end
;
1492 if (mappings
[i
].sms_init_prot
& VM_PROT_ZF
) {
1493 /* zero-filled memory */
1494 map_port
= MACH_PORT_NULL
;
1496 /* file-backed memory */
1497 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1501 * Remember which mappings need sliding.
1503 if (mappings
[i
].sms_max_prot
& VM_PROT_SLIDE
) {
1504 if (*mappings_to_slide_cnt
== VMSR_NUM_SLIDES
) {
1505 SHARED_REGION_TRACE_INFO(
1506 ("shared_region: mapping[%d]: "
1507 "address:0x%016llx size:0x%016llx "
1509 "maxprot:0x%x prot:0x%x "
1510 "too many mappings to slide...\n",
1512 (long long)mappings
[i
].sms_address
,
1513 (long long)mappings
[i
].sms_size
,
1514 (long long)mappings
[i
].sms_file_offset
,
1515 mappings
[i
].sms_max_prot
,
1516 mappings
[i
].sms_init_prot
));
1518 mappings_to_slide
[*mappings_to_slide_cnt
] = &mappings
[i
];
1519 *mappings_to_slide_cnt
+= 1;
1523 /* mapping's address is relative to the shared region base */
1524 target_address
= (vm_map_offset_t
)(mappings
[i
].sms_address
- sr_base_address
);
1526 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1527 vmk_flags
.vmkf_already
= TRUE
;
1528 /* no copy-on-read for mapped binaries */
1529 vmk_flags
.vmkf_no_copy_on_read
= 1;
1532 /* establish that mapping, OK if it's "already" there */
1533 if (map_port
== MACH_PORT_NULL
) {
1535 * We want to map some anonymous memory in a shared region.
1536 * We have to create the VM object now, so that it can be mapped "copy-on-write".
1538 obj_size
= vm_map_round_page(mappings
[i
].sms_size
, VM_MAP_PAGE_MASK(sr_map
));
1539 object
= vm_object_allocate(obj_size
);
1540 if (object
== VM_OBJECT_NULL
) {
1541 kr
= KERN_RESOURCE_SHORTAGE
;
1546 vm_map_round_page(mappings
[i
].sms_size
,
1547 VM_MAP_PAGE_MASK(sr_map
)),
1551 VM_KERN_MEMORY_NONE
,
1555 mappings
[i
].sms_init_prot
& VM_PROT_ALL
,
1556 mappings
[i
].sms_max_prot
& VM_PROT_ALL
,
1557 VM_INHERIT_DEFAULT
);
1560 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1561 kr
= vm_map_enter_mem_object(
1564 vm_map_round_page(mappings
[i
].sms_size
,
1565 VM_MAP_PAGE_MASK(sr_map
)),
1569 VM_KERN_MEMORY_NONE
,
1571 mappings
[i
].sms_file_offset
,
1573 mappings
[i
].sms_init_prot
& VM_PROT_ALL
,
1574 mappings
[i
].sms_max_prot
& VM_PROT_ALL
,
1575 VM_INHERIT_DEFAULT
);
1578 if (kr
== KERN_SUCCESS
) {
1580 * Record the first (chronologically) successful
1581 * mapping in this shared region.
1582 * We're protected by "sr_mapping_in_progress" here,
1583 * so no need to lock "shared_region".
1585 assert(current_file_index
< VMSR_NUM_SLIDES
);
1586 if (file_first_mappings
[current_file_index
] == (mach_vm_offset_t
) -1) {
1587 file_first_mappings
[current_file_index
] = target_address
;
1590 if (*mappings_to_slide_cnt
> 0 &&
1591 mappings_to_slide
[*mappings_to_slide_cnt
- 1] == &mappings
[i
]) {
1592 slid_mappings
[*mappings_to_slide_cnt
- 1] = target_address
;
1593 slid_file_controls
[*mappings_to_slide_cnt
- 1] = file_control
;
1597 * Record the lowest writable address in this
1598 * sub map, to log any unexpected unnesting below
1599 * that address (see log_unnest_badness()).
1601 if ((mappings
[i
].sms_init_prot
& VM_PROT_WRITE
) &&
1602 sr_map
->is_nested_map
&&
1603 (lowest_unnestable_addr
== 0 ||
1604 (target_address
< lowest_unnestable_addr
))) {
1605 lowest_unnestable_addr
= target_address
;
1608 if (map_port
== MACH_PORT_NULL
) {
1610 * Get rid of the VM object we just created
1611 * but failed to map.
1613 vm_object_deallocate(object
);
1614 object
= VM_OBJECT_NULL
;
1616 if (kr
== KERN_MEMORY_PRESENT
) {
1618 * This exact mapping was already there:
1621 SHARED_REGION_TRACE_INFO(
1622 ("shared_region: mapping[%d]: "
1623 "address:0x%016llx size:0x%016llx "
1625 "maxprot:0x%x prot:0x%x "
1626 "already mapped...\n",
1628 (long long)mappings
[i
].sms_address
,
1629 (long long)mappings
[i
].sms_size
,
1630 (long long)mappings
[i
].sms_file_offset
,
1631 mappings
[i
].sms_max_prot
,
1632 mappings
[i
].sms_init_prot
));
1634 * We didn't establish this mapping ourselves;
1635 * let's reset its size, so that we do not
1636 * attempt to undo it if an error occurs later.
1638 mappings
[i
].sms_size
= 0;
1646 if (kr
!= KERN_SUCCESS
) {
1650 ++current_file_index
;
1653 if (file_first_mappings
[0] != (mach_vm_offset_t
)-1) {
1654 *first_mapping
= file_first_mappings
[0];
1658 if (kr
!= KERN_SUCCESS
) {
1659 /* the last mapping we tried (mappings[i]) failed ! */
1660 assert(i
< mappings_count
);
1661 SHARED_REGION_TRACE_ERROR(
1662 ("shared_region: mapping[%d]: "
1663 "address:0x%016llx size:0x%016llx "
1665 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1667 (long long)mappings
[i
].sms_address
,
1668 (long long)mappings
[i
].sms_size
,
1669 (long long)mappings
[i
].sms_file_offset
,
1670 mappings
[i
].sms_max_prot
,
1671 mappings
[i
].sms_init_prot
,
1675 * Respect the design of vm_shared_region_undo_mappings
1676 * as we are holding the sr_mapping_in_progress == true here.
1677 * So don't allow sr_map == NULL otherwise vm_shared_region_undo_mappings
1678 * will be blocked at waiting sr_mapping_in_progress to be false.
1680 assert(sr_map
!= NULL
);
1681 /* undo all the previous mappings */
1682 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, sr_file_mappings
, srfmp
, i
);
1686 *lowest_unnestable_addr_ptr
= lowest_unnestable_addr
;
1687 *sr_map_ptr
= sr_map
;
1688 return KERN_SUCCESS
;
1691 /* forwared declaration */
1692 __attribute__((noinline
))
1694 vm_shared_region_map_file_final(
1695 vm_shared_region_t shared_region
,
1697 mach_vm_offset_t sfm_min_address
,
1698 mach_vm_offset_t sfm_max_address
,
1699 mach_vm_offset_t
*file_first_mappings
);
1702 * Establish some mappings of a file in the shared region.
1703 * This is used by "dyld" via the shared_region_map_np() system call
1704 * to populate the shared region with the appropriate shared cache.
1706 * One could also call it several times to incrementally load several
1707 * libraries, as long as they do not overlap.
1708 * It will return KERN_SUCCESS if the mappings were successfully established
1709 * or if they were already established identically by another process.
1711 __attribute__((noinline
))
1713 vm_shared_region_map_file(
1714 vm_shared_region_t shared_region
,
1715 int sr_file_mappings_count
,
1716 struct _sr_file_mappings
*sr_file_mappings
)
1718 kern_return_t kr
= KERN_SUCCESS
;
1720 unsigned int mappings_to_slide_cnt
= 0;
1721 struct shared_file_mapping_slide_np
*mappings_to_slide
[VMSR_NUM_SLIDES
] = {};
1722 mach_vm_offset_t slid_mappings
[VMSR_NUM_SLIDES
];
1723 memory_object_control_t slid_file_controls
[VMSR_NUM_SLIDES
];
1724 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
)-1;
1725 mach_vm_offset_t sfm_min_address
= (mach_vm_offset_t
)-1;
1726 mach_vm_offset_t sfm_max_address
= 0;
1727 vm_map_t sr_map
= NULL
;
1728 vm_map_offset_t lowest_unnestable_addr
= 0;
1729 mach_vm_offset_t file_first_mappings
[VMSR_NUM_SLIDES
] = {(mach_vm_offset_t
) -1, (mach_vm_offset_t
) -1};
1731 kr
= vm_shared_region_map_file_setup(shared_region
, sr_file_mappings_count
, sr_file_mappings
,
1732 &mappings_to_slide_cnt
, &mappings_to_slide
[0], slid_mappings
, slid_file_controls
,
1733 &first_mapping
, &file_first_mappings
[0],
1734 &sfm_min_address
, &sfm_max_address
, &sr_map
, &lowest_unnestable_addr
);
1735 if (kr
!= KERN_SUCCESS
) {
1736 vm_shared_region_lock();
1741 * The call above installed direct mappings to the shared cache file.
1742 * Now we go back and overwrite the mappings that need relocation
1743 * with a special shared region pager.
1745 for (i
= 0; i
< mappings_to_slide_cnt
; ++i
) {
1746 kr
= vm_shared_region_slide(shared_region
->sr_slide
,
1747 mappings_to_slide
[i
]->sms_file_offset
,
1748 mappings_to_slide
[i
]->sms_size
,
1749 mappings_to_slide
[i
]->sms_slide_start
,
1750 mappings_to_slide
[i
]->sms_slide_size
,
1752 slid_file_controls
[i
],
1753 mappings_to_slide
[i
]->sms_max_prot
);
1754 if (kr
!= KERN_SUCCESS
) {
1755 SHARED_REGION_TRACE_ERROR(
1756 ("shared_region: region_slide("
1757 "slide:0x%x start:0x%016llx "
1758 "size:0x%016llx) failed 0x%x\n",
1759 shared_region
->sr_slide
,
1760 (long long)mappings_to_slide
[i
]->sms_slide_start
,
1761 (long long)mappings_to_slide
[i
]->sms_slide_size
,
1763 vm_shared_region_lock();
1768 assert(kr
== KERN_SUCCESS
);
1770 /* adjust the map's "lowest_unnestable_start" */
1771 lowest_unnestable_addr
&= ~(pmap_shared_region_size_min(sr_map
->pmap
) - 1);
1772 if (lowest_unnestable_addr
!= sr_map
->lowest_unnestable_start
) {
1773 vm_map_lock(sr_map
);
1774 sr_map
->lowest_unnestable_start
= lowest_unnestable_addr
;
1775 vm_map_unlock(sr_map
);
1778 vm_shared_region_lock();
1779 assert(shared_region
->sr_ref_count
> 1);
1780 assert(shared_region
->sr_mapping_in_progress
);
1782 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1783 if (shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1784 shared_region
->sr_first_mapping
= first_mapping
;
1787 vm_shared_region_map_file_final(shared_region
, sr_map
, sfm_min_address
, sfm_max_address
,
1788 &file_first_mappings
[0]);
1792 * We're done working on that shared region.
1793 * Wake up any waiting threads.
1795 shared_region
->sr_mapping_in_progress
= FALSE
;
1796 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1797 vm_shared_region_unlock();
1799 #if __has_feature(ptrauth_calls)
1800 if (kr
== KERN_SUCCESS
) {
1802 * Since authenticated mappings were just added to the shared region,
1803 * go back and remap them into private mappings for this task.
1805 kr
= vm_shared_region_auth_remap(shared_region
);
1807 #endif /* __has_feature(ptrauth_calls) */
1809 SHARED_REGION_TRACE_DEBUG(
1810 ("shared_region: map(%p) <- 0x%x \n",
1811 (void *)VM_KERNEL_ADDRPERM(shared_region
), kr
));
1816 * Final part of vm_shared_region_map_file().
1817 * Kept in separate function to avoid blowing out the stack.
1819 __attribute__((noinline
))
1821 vm_shared_region_map_file_final(
1822 vm_shared_region_t shared_region
,
1824 mach_vm_offset_t sfm_min_address
,
1825 mach_vm_offset_t sfm_max_address
,
1826 __unused mach_vm_offset_t
*file_first_mappings
)
1828 struct _dyld_cache_header sr_cache_header
;
1830 size_t image_array_length
;
1831 struct _dyld_cache_image_text_info
*sr_image_layout
;
1835 * copy in the shared region UUID to the shared region structure.
1836 * we do this indirectly by first copying in the shared cache header
1837 * and then copying the UUID from there because we'll need to look
1838 * at other content from the shared cache header.
1840 if (!shared_region
->sr_uuid_copied
) {
1841 error
= copyin((user_addr_t
)(shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
),
1842 (char *)&sr_cache_header
,
1843 sizeof(sr_cache_header
));
1845 memcpy(&shared_region
->sr_uuid
, &sr_cache_header
.uuid
, sizeof(shared_region
->sr_uuid
));
1846 shared_region
->sr_uuid_copied
= TRUE
;
1848 #if DEVELOPMENT || DEBUG
1849 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1850 "offset:0 size:0x%016llx) failed with %d\n",
1851 (long long)shared_region
->sr_base_address
,
1852 (long long)shared_region
->sr_first_mapping
,
1853 (long long)sizeof(sr_cache_header
),
1855 #endif /* DEVELOPMENT || DEBUG */
1856 shared_region
->sr_uuid_copied
= FALSE
;
1861 * If the shared cache is associated with the init task (and is therefore the system shared cache),
1862 * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
1864 boolean_t is_init_task
= (task_pid(current_task()) == 1);
1865 if (shared_region
->sr_uuid_copied
&& is_init_task
) {
1866 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1867 if (sr_cache_header
.locallyBuiltCache
) {
1868 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_START
);
1869 image_array_length
= (size_t)(sr_cache_header
.imagesTextCount
* sizeof(struct _dyld_cache_image_text_info
));
1870 sr_image_layout
= kheap_alloc(KHEAP_DATA_BUFFERS
, image_array_length
, Z_WAITOK
);
1871 error
= copyin((user_addr_t
)(shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1872 sr_cache_header
.imagesTextOffset
), (char *)sr_image_layout
, image_array_length
);
1874 shared_region
->sr_images
= kalloc((vm_size_t
)(sr_cache_header
.imagesTextCount
* sizeof(struct dyld_uuid_info_64
)));
1875 for (size_t index
= 0; index
< sr_cache_header
.imagesTextCount
; index
++) {
1876 memcpy((char *)&shared_region
->sr_images
[index
].imageUUID
, (char *)&sr_image_layout
[index
].uuid
,
1877 sizeof(shared_region
->sr_images
[index
].imageUUID
));
1878 shared_region
->sr_images
[index
].imageLoadAddress
= sr_image_layout
[index
].loadAddress
;
1881 assert(sr_cache_header
.imagesTextCount
< UINT32_MAX
);
1882 shared_region
->sr_images_count
= (uint32_t) sr_cache_header
.imagesTextCount
;
1884 #if DEVELOPMENT || DEBUG
1885 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1886 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1887 (long long)shared_region
->sr_base_address
,
1888 (long long)shared_region
->sr_first_mapping
,
1889 (long long)sr_cache_header
.imagesTextOffset
,
1890 (long long)image_array_length
,
1892 #endif /* DEVELOPMENT || DEBUG */
1894 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_END
, shared_region
->sr_images_count
);
1895 kheap_free(KHEAP_DATA_BUFFERS
, sr_image_layout
, image_array_length
);
1896 sr_image_layout
= NULL
;
1898 init_task_shared_region
= shared_region
;
1902 * If we succeeded, we know the bounds of the shared region.
1903 * Trim our pmaps to only cover this range (if applicable to
1906 if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map
)) {
1907 pmap_trim(current_map()->pmap
, sr_map
->pmap
, sfm_min_address
, sfm_max_address
- sfm_min_address
);
1912 * Retrieve a task's shared region and grab an extra reference to
1913 * make sure it doesn't disappear while the caller is using it.
1914 * The caller is responsible for consuming that extra reference if
1917 * This also tries to trim the pmap for the shared region.
1920 vm_shared_region_trim_and_get(task_t task
)
1922 vm_shared_region_t shared_region
;
1923 ipc_port_t sr_handle
;
1924 vm_named_entry_t sr_mem_entry
;
1927 /* Get the shared region and the map. */
1928 shared_region
= vm_shared_region_get(task
);
1929 if (shared_region
== NULL
) {
1933 sr_handle
= shared_region
->sr_mem_entry
;
1934 sr_mem_entry
= (vm_named_entry_t
) ip_get_kobject(sr_handle
);
1935 sr_map
= sr_mem_entry
->backing
.map
;
1937 /* Trim the pmap if possible. */
1938 if (VM_MAP_PAGE_SHIFT(task
->map
) == VM_MAP_PAGE_SHIFT(sr_map
)) {
1939 pmap_trim(task
->map
->pmap
, sr_map
->pmap
, 0, 0);
1942 return shared_region
;
1946 * Enter the appropriate shared region into "map" for "task".
1947 * This involves looking up the shared region (and possibly creating a new
1948 * one) for the desired environment, then mapping the VM sub map into the
1949 * task's VM "map", with the appropriate level of pmap-nesting.
1952 vm_shared_region_enter(
1953 struct _vm_map
*map
,
1958 cpu_subtype_t cpu_subtype
,
1962 vm_shared_region_t shared_region
;
1963 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1964 vm_map_size_t sr_size
, mapping_size
;
1965 vm_map_offset_t sr_pmap_nesting_start
;
1966 vm_map_size_t sr_pmap_nesting_size
;
1967 ipc_port_t sr_handle
;
1968 vm_prot_t cur_prot
, max_prot
;
1970 SHARED_REGION_TRACE_DEBUG(
1971 ("shared_region: -> "
1972 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1973 (void *)VM_KERNEL_ADDRPERM(map
),
1974 (void *)VM_KERNEL_ADDRPERM(task
),
1975 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1976 cpu
, cpu_subtype
, is_64bit
));
1978 /* lookup (create if needed) the shared region for this environment */
1979 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, cpu_subtype
, is_64bit
, reslide
);
1980 if (shared_region
== NULL
) {
1981 /* this should not happen ! */
1982 SHARED_REGION_TRACE_ERROR(
1983 ("shared_region: -> "
1984 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d): "
1985 "lookup failed !\n",
1986 (void *)VM_KERNEL_ADDRPERM(map
),
1987 (void *)VM_KERNEL_ADDRPERM(task
),
1988 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1989 cpu
, cpu_subtype
, is_64bit
, reslide
));
1990 //panic("shared_region_enter: lookup failed\n");
1991 return KERN_FAILURE
;
1995 /* no need to lock since this data is never modified */
1996 sr_address
= (vm_map_offset_t
)shared_region
->sr_base_address
;
1997 sr_size
= (vm_map_size_t
)shared_region
->sr_size
;
1998 sr_handle
= shared_region
->sr_mem_entry
;
1999 sr_pmap_nesting_start
= (vm_map_offset_t
)shared_region
->sr_pmap_nesting_start
;
2000 sr_pmap_nesting_size
= (vm_map_size_t
)shared_region
->sr_pmap_nesting_size
;
2002 cur_prot
= VM_PROT_READ
;
2003 if (VM_MAP_POLICY_WRITABLE_SHARED_REGION(map
)) {
2005 * XXX BINARY COMPATIBILITY
2006 * java6 apparently needs to modify some code in the
2007 * dyld shared cache and needs to be allowed to add
2010 max_prot
= VM_PROT_ALL
;
2012 max_prot
= VM_PROT_READ
;
2016 * Start mapping the shared region's VM sub map into the task's VM map.
2020 if (sr_pmap_nesting_start
> sr_address
) {
2021 /* we need to map a range without pmap-nesting first */
2022 target_address
= sr_address
;
2023 mapping_size
= sr_pmap_nesting_start
- sr_address
;
2024 kr
= vm_map_enter_mem_object(
2030 VM_MAP_KERNEL_FLAGS_NONE
,
2031 VM_KERN_MEMORY_NONE
,
2038 if (kr
!= KERN_SUCCESS
) {
2039 SHARED_REGION_TRACE_ERROR(
2040 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2041 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2042 (void *)VM_KERNEL_ADDRPERM(map
),
2043 (void *)VM_KERNEL_ADDRPERM(task
),
2044 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2045 cpu
, cpu_subtype
, is_64bit
,
2046 (long long)target_address
,
2047 (long long)mapping_size
,
2048 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2051 SHARED_REGION_TRACE_DEBUG(
2052 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2053 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2054 (void *)VM_KERNEL_ADDRPERM(map
),
2055 (void *)VM_KERNEL_ADDRPERM(task
),
2056 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2057 cpu
, cpu_subtype
, is_64bit
,
2058 (long long)target_address
, (long long)mapping_size
,
2059 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2060 sr_offset
+= mapping_size
;
2061 sr_size
-= mapping_size
;
2064 * We may need to map several pmap-nested portions, due to platform
2065 * specific restrictions on pmap nesting.
2066 * The pmap-nesting is triggered by the "vmkf_nested_pmap" flag...
2069 sr_pmap_nesting_size
> 0;
2070 sr_offset
+= mapping_size
,
2071 sr_size
-= mapping_size
,
2072 sr_pmap_nesting_size
-= mapping_size
) {
2073 vm_map_kernel_flags_t vmk_flags
;
2075 target_address
= sr_address
+ sr_offset
;
2076 mapping_size
= sr_pmap_nesting_size
;
2077 if (mapping_size
> pmap_nesting_size_max(map
->pmap
)) {
2078 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max(map
->pmap
);
2080 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2081 vmk_flags
.vmkf_nested_pmap
= TRUE
;
2082 kr
= vm_map_enter_mem_object(
2089 VM_MEMORY_SHARED_PMAP
,
2096 if (kr
!= KERN_SUCCESS
) {
2097 SHARED_REGION_TRACE_ERROR(
2098 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2099 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2100 (void *)VM_KERNEL_ADDRPERM(map
),
2101 (void *)VM_KERNEL_ADDRPERM(task
),
2102 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2103 cpu
, cpu_subtype
, is_64bit
,
2104 (long long)target_address
,
2105 (long long)mapping_size
,
2106 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2109 SHARED_REGION_TRACE_DEBUG(
2110 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2111 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2112 (void *)VM_KERNEL_ADDRPERM(map
),
2113 (void *)VM_KERNEL_ADDRPERM(task
),
2114 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2115 cpu
, cpu_subtype
, is_64bit
,
2116 (long long)target_address
, (long long)mapping_size
,
2117 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2120 /* and there's some left to be mapped without pmap-nesting */
2121 target_address
= sr_address
+ sr_offset
;
2122 mapping_size
= sr_size
;
2123 kr
= vm_map_enter_mem_object(
2129 VM_MAP_KERNEL_FLAGS_NONE
,
2130 VM_KERN_MEMORY_NONE
,
2137 if (kr
!= KERN_SUCCESS
) {
2138 SHARED_REGION_TRACE_ERROR(
2139 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2140 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2141 (void *)VM_KERNEL_ADDRPERM(map
),
2142 (void *)VM_KERNEL_ADDRPERM(task
),
2143 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2144 cpu
, cpu_subtype
, is_64bit
,
2145 (long long)target_address
,
2146 (long long)mapping_size
,
2147 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2150 SHARED_REGION_TRACE_DEBUG(
2151 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
2152 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
2153 (void *)VM_KERNEL_ADDRPERM(map
),
2154 (void *)VM_KERNEL_ADDRPERM(task
),
2155 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2156 cpu
, cpu_subtype
, is_64bit
,
2157 (long long)target_address
, (long long)mapping_size
,
2158 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
2159 sr_offset
+= mapping_size
;
2160 sr_size
-= mapping_size
;
2162 assert(sr_size
== 0);
2165 if (kr
== KERN_SUCCESS
) {
2166 /* let the task use that shared region */
2167 vm_shared_region_set(task
, shared_region
);
2169 /* drop our reference since we're not using it */
2170 vm_shared_region_deallocate(shared_region
);
2171 vm_shared_region_set(task
, NULL
);
2174 SHARED_REGION_TRACE_DEBUG(
2175 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
2176 (void *)VM_KERNEL_ADDRPERM(map
),
2177 (void *)VM_KERNEL_ADDRPERM(task
),
2178 (void *)VM_KERNEL_ADDRPERM(fsroot
),
2179 cpu
, cpu_subtype
, is_64bit
,
2184 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
2185 struct vm_shared_region_slide_info slide_info
;
2188 vm_shared_region_sliding_valid(uint32_t slide
)
2190 kern_return_t kr
= KERN_SUCCESS
;
2191 vm_shared_region_t sr
= vm_shared_region_get(current_task());
2193 /* No region yet? we're fine. */
2198 if (sr
->sr_slide
!= 0 && slide
!= 0) {
2199 if (slide
== sr
->sr_slide
) {
2201 * Request for sliding when we've
2202 * already done it with exactly the
2203 * same slide value before.
2204 * This isn't wrong technically but
2205 * we don't want to slide again and
2206 * so we return this value.
2208 kr
= KERN_INVALID_ARGUMENT
;
2210 printf("Mismatched shared region slide\n");
2214 vm_shared_region_deallocate(sr
);
2219 * Actually create (really overwrite) the mapping to part of the shared cache which
2220 * undergoes relocation. This routine reads in the relocation info from dyld and
2221 * verifies it. It then creates a (or finds a matching) shared region pager which
2222 * handles the actual modification of the page contents and installs the mapping
2226 vm_shared_region_slide_mapping(
2227 vm_shared_region_t sr
,
2228 user_addr_t slide_info_addr
,
2229 mach_vm_size_t slide_info_size
,
2230 mach_vm_offset_t start
,
2231 mach_vm_size_t size
,
2232 mach_vm_offset_t slid_mapping
,
2234 memory_object_control_t sr_file_control
,
2238 vm_object_t object
= VM_OBJECT_NULL
;
2239 vm_shared_region_slide_info_t si
= NULL
;
2240 vm_map_entry_t tmp_entry
= VM_MAP_ENTRY_NULL
;
2241 struct vm_map_entry tmp_entry_store
;
2242 memory_object_t sr_pager
= MEMORY_OBJECT_NULL
;
2245 vm_map_kernel_flags_t vmk_flags
;
2246 vm_map_offset_t map_addr
;
2247 void *slide_info_entry
= NULL
;
2250 assert(sr
->sr_slide_in_progress
);
2252 if (sr_file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
2253 return KERN_INVALID_ARGUMENT
;
2257 * Copy in and verify the relocation information.
2259 if (slide_info_size
< MIN_SLIDE_INFO_SIZE
) {
2260 printf("Slide_info_size too small: %lx\n", (uintptr_t)slide_info_size
);
2261 return KERN_FAILURE
;
2263 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
2264 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
2265 return KERN_FAILURE
;
2268 slide_info_entry
= kheap_alloc(KHEAP_DATA_BUFFERS
, (vm_size_t
)slide_info_size
, Z_WAITOK
);
2269 if (slide_info_entry
== NULL
) {
2270 return KERN_RESOURCE_SHORTAGE
;
2272 error
= copyin(slide_info_addr
, slide_info_entry
, (size_t)slide_info_size
);
2274 printf("copyin of slide_info failed\n");
2275 kr
= KERN_INVALID_ADDRESS
;
2279 if ((kr
= vm_shared_region_slide_sanity_check(slide_info_entry
, slide_info_size
)) != KERN_SUCCESS
) {
2280 printf("Sanity Check failed for slide_info\n");
2285 * Allocate and fill in a vm_shared_region_slide_info.
2286 * This will either be used by a new pager, or used to find
2287 * a pre-existing matching pager.
2289 object
= memory_object_control_to_vm_object(sr_file_control
);
2290 if (object
== VM_OBJECT_NULL
|| object
->internal
) {
2291 object
= VM_OBJECT_NULL
;
2292 kr
= KERN_INVALID_ADDRESS
;
2296 si
= kalloc(sizeof(*si
));
2298 kr
= KERN_RESOURCE_SHORTAGE
;
2301 vm_object_lock(object
);
2303 vm_object_reference_locked(object
); /* for si->slide_object */
2304 object
->object_is_shared_cache
= TRUE
;
2305 vm_object_unlock(object
);
2307 si
->si_slide_info_entry
= slide_info_entry
;
2308 si
->si_slide_info_size
= slide_info_size
;
2310 assert(slid_mapping
!= (mach_vm_offset_t
) -1);
2311 si
->si_slid_address
= slid_mapping
+ sr
->sr_base_address
;
2312 si
->si_slide_object
= object
;
2313 si
->si_start
= start
;
2314 si
->si_end
= si
->si_start
+ size
;
2315 si
->si_slide
= slide
;
2316 #if __has_feature(ptrauth_calls)
2318 * If there is authenticated pointer data in this slid mapping,
2319 * then just add the information needed to create new pagers for
2320 * different shared_region_id's later.
2322 if (sr
->sr_cpu_type
== CPU_TYPE_ARM64
&&
2323 sr
->sr_cpu_subtype
== CPU_SUBTYPE_ARM64E
&&
2324 !(prot
& VM_PROT_NOAUTH
)) {
2325 if (sr
->sr_num_auth_section
== NUM_SR_AUTH_SECTIONS
) {
2326 printf("Too many auth/private sections for shared region!!\n");
2327 kr
= KERN_INVALID_ARGUMENT
;
2330 si
->si_ptrauth
= TRUE
;
2331 sr
->sr_auth_section
[sr
->sr_num_auth_section
++] = si
;
2333 * Remember the shared region, since that's where we'll
2334 * stash this info for all auth pagers to share. Each pager
2335 * will need to take a reference to it.
2337 si
->si_shared_region
= sr
;
2341 si
->si_shared_region
= NULL
;
2342 si
->si_ptrauth
= FALSE
;
2343 #else /* __has_feature(ptrauth_calls) */
2344 (void)prot
; /* silence unused warning */
2345 #endif /* __has_feature(ptrauth_calls) */
2348 * find the pre-existing shared region's map entry to slide
2350 sr_map
= vm_shared_region_vm_map(sr
);
2351 kr
= find_mapping_to_slide(sr_map
, (vm_map_address_t
)slid_mapping
, &tmp_entry_store
);
2352 if (kr
!= KERN_SUCCESS
) {
2355 tmp_entry
= &tmp_entry_store
;
2358 * The object must exactly cover the region to slide.
2360 assert(VME_OFFSET(tmp_entry
) == start
);
2361 assert(tmp_entry
->vme_end
- tmp_entry
->vme_start
== size
);
2363 /* create a "shared_region" sliding pager */
2364 sr_pager
= shared_region_pager_setup(VME_OBJECT(tmp_entry
), VME_OFFSET(tmp_entry
), si
, 0);
2365 if (sr_pager
== MEMORY_OBJECT_NULL
) {
2366 kr
= KERN_RESOURCE_SHORTAGE
;
2370 /* map that pager over the portion of the mapping that needs sliding */
2371 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
2372 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2373 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
2374 map_addr
= tmp_entry
->vme_start
;
2375 kr
= vm_map_enter_mem_object(sr_map
,
2377 (tmp_entry
->vme_end
- tmp_entry
->vme_start
),
2378 (mach_vm_offset_t
) 0,
2381 VM_KERN_MEMORY_NONE
,
2382 (ipc_port_t
)(uintptr_t) sr_pager
,
2385 tmp_entry
->protection
,
2386 tmp_entry
->max_protection
,
2387 tmp_entry
->inheritance
);
2388 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x\n", kr
);
2389 assertf(map_addr
== tmp_entry
->vme_start
,
2390 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
2392 (uint64_t) tmp_entry
->vme_start
,
2399 if (sr_pager
!= NULL
) {
2401 * Release the sr_pager reference obtained by shared_region_pager_setup().
2402 * The mapping, if it succeeded, is now holding a reference on the memory object.
2404 memory_object_deallocate(sr_pager
);
2405 sr_pager
= MEMORY_OBJECT_NULL
;
2407 if (tmp_entry
!= NULL
) {
2408 /* release extra ref on tmp_entry's VM object */
2409 vm_object_deallocate(VME_OBJECT(tmp_entry
));
2410 tmp_entry
= VM_MAP_ENTRY_NULL
;
2413 if (kr
!= KERN_SUCCESS
) {
2416 if (si
->si_slide_object
) {
2417 vm_object_deallocate(si
->si_slide_object
);
2418 si
->si_slide_object
= VM_OBJECT_NULL
;
2420 kfree(si
, sizeof(*si
));
2423 if (slide_info_entry
!= NULL
) {
2424 kheap_free(KHEAP_DATA_BUFFERS
, slide_info_entry
, (vm_size_t
)slide_info_size
);
2425 slide_info_entry
= NULL
;
2431 static kern_return_t
2432 vm_shared_region_slide_sanity_check_v2(
2433 vm_shared_region_slide_info_entry_v2_t s_info
,
2434 mach_vm_size_t slide_info_size
)
2436 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v2
)) {
2437 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2438 return KERN_FAILURE
;
2440 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2441 return KERN_FAILURE
;
2444 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2446 uint32_t page_starts_count
= s_info
->page_starts_count
;
2447 uint32_t page_extras_count
= s_info
->page_extras_count
;
2448 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2449 if (num_trailing_entries
< page_starts_count
) {
2450 return KERN_FAILURE
;
2453 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2454 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2455 if (trailing_size
>> 1 != num_trailing_entries
) {
2456 return KERN_FAILURE
;
2459 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2460 if (required_size
< sizeof(*s_info
)) {
2461 return KERN_FAILURE
;
2464 if (required_size
> slide_info_size
) {
2465 return KERN_FAILURE
;
2468 return KERN_SUCCESS
;
2471 static kern_return_t
2472 vm_shared_region_slide_sanity_check_v3(
2473 vm_shared_region_slide_info_entry_v3_t s_info
,
2474 mach_vm_size_t slide_info_size
)
2476 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v3
)) {
2477 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2478 return KERN_FAILURE
;
2480 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2481 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info
->page_size
, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE
);
2482 return KERN_FAILURE
;
2485 uint32_t page_starts_count
= s_info
->page_starts_count
;
2486 mach_vm_size_t num_trailing_entries
= page_starts_count
;
2487 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2488 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2489 if (required_size
< sizeof(*s_info
)) {
2490 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)sizeof(*s_info
));
2491 return KERN_FAILURE
;
2494 if (required_size
> slide_info_size
) {
2495 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)slide_info_size
);
2496 return KERN_FAILURE
;
2499 return KERN_SUCCESS
;
2502 static kern_return_t
2503 vm_shared_region_slide_sanity_check_v4(
2504 vm_shared_region_slide_info_entry_v4_t s_info
,
2505 mach_vm_size_t slide_info_size
)
2507 if (slide_info_size
< sizeof(struct vm_shared_region_slide_info_entry_v4
)) {
2508 printf("%s bad slide_info_size: %lx\n", __func__
, (uintptr_t)slide_info_size
);
2509 return KERN_FAILURE
;
2511 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2512 return KERN_FAILURE
;
2515 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2517 uint32_t page_starts_count
= s_info
->page_starts_count
;
2518 uint32_t page_extras_count
= s_info
->page_extras_count
;
2519 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2520 if (num_trailing_entries
< page_starts_count
) {
2521 return KERN_FAILURE
;
2524 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2525 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2526 if (trailing_size
>> 1 != num_trailing_entries
) {
2527 return KERN_FAILURE
;
2530 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2531 if (required_size
< sizeof(*s_info
)) {
2532 return KERN_FAILURE
;
2535 if (required_size
> slide_info_size
) {
2536 return KERN_FAILURE
;
2539 return KERN_SUCCESS
;
2543 static kern_return_t
2544 vm_shared_region_slide_sanity_check(
2545 vm_shared_region_slide_info_entry_t s_info
,
2546 mach_vm_size_t s_info_size
)
2550 switch (s_info
->version
) {
2552 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, s_info_size
);
2555 kr
= vm_shared_region_slide_sanity_check_v3(&s_info
->v3
, s_info_size
);
2558 kr
= vm_shared_region_slide_sanity_check_v4(&s_info
->v4
, s_info_size
);
2566 static kern_return_t
2568 uint8_t *page_content
,
2569 uint16_t start_offset
,
2570 uint32_t slide_amount
,
2571 vm_shared_region_slide_info_entry_v2_t s_info
)
2573 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2575 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2576 const uint32_t value_mask
= ~delta_mask
;
2577 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2578 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2580 uint32_t page_offset
= start_offset
;
2583 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2587 loc
= page_content
+ page_offset
;
2588 memcpy(&value
, loc
, sizeof(value
));
2589 delta
= (value
& delta_mask
) >> delta_shift
;
2590 value
&= value_mask
;
2594 value
+= slide_amount
;
2596 memcpy(loc
, &value
, sizeof(value
));
2597 page_offset
+= delta
;
2600 /* If the offset went past the end of the page, then the slide data is invalid. */
2601 if (page_offset
> last_page_offset
) {
2602 return KERN_FAILURE
;
2604 return KERN_SUCCESS
;
2607 static kern_return_t
2609 uint8_t *page_content
,
2610 uint16_t start_offset
,
2611 uint32_t slide_amount
,
2612 vm_shared_region_slide_info_entry_v2_t s_info
)
2614 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2616 const uint64_t delta_mask
= s_info
->delta_mask
;
2617 const uint64_t value_mask
= ~delta_mask
;
2618 const uint64_t value_add
= s_info
->value_add
;
2619 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2621 uint32_t page_offset
= start_offset
;
2624 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2628 loc
= page_content
+ page_offset
;
2629 memcpy(&value
, loc
, sizeof(value
));
2630 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2631 value
&= value_mask
;
2635 value
+= slide_amount
;
2637 memcpy(loc
, &value
, sizeof(value
));
2638 page_offset
+= delta
;
2641 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2642 /* If a pointer straddling the page boundary needs to be adjusted, then
2643 * add the slide to the lower half. The encoding guarantees that the upper
2644 * half on the next page will need no masking.
2646 * This assumes a little-endian machine and that the region being slid
2647 * never crosses a 4 GB boundary. */
2649 uint8_t *loc
= page_content
+ page_offset
;
2652 memcpy(&value
, loc
, sizeof(value
));
2653 value
+= slide_amount
;
2654 memcpy(loc
, &value
, sizeof(value
));
2655 } else if (page_offset
> last_page_offset
) {
2656 return KERN_FAILURE
;
2659 return KERN_SUCCESS
;
2662 static kern_return_t
2666 uint8_t *page_content
,
2667 uint16_t start_offset
,
2668 uint32_t slide_amount
,
2669 vm_shared_region_slide_info_entry_v2_t s_info
)
2673 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2675 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2678 if (kr
!= KERN_SUCCESS
) {
2679 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2680 pageIndex
, start_offset
, slide_amount
);
2685 static kern_return_t
2686 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2688 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->si_slide_info_entry
->v2
;
2689 const uint32_t slide_amount
= si
->si_slide
;
2691 /* The high bits of the delta_mask field are nonzero precisely when the shared
2692 * cache is 64-bit. */
2693 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2695 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2696 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2698 uint8_t *page_content
= (uint8_t *)vaddr
;
2699 uint16_t page_entry
;
2701 if (pageIndex
>= s_info
->page_starts_count
) {
2702 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2703 pageIndex
, s_info
->page_starts_count
);
2704 return KERN_FAILURE
;
2706 page_entry
= page_starts
[pageIndex
];
2708 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2709 return KERN_SUCCESS
;
2712 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2713 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2717 uint16_t page_start_offset
;
2720 if (chain_index
>= s_info
->page_extras_count
) {
2721 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2722 chain_index
, s_info
->page_extras_count
);
2723 return KERN_FAILURE
;
2725 info
= page_extras
[chain_index
];
2726 page_start_offset
= (uint16_t)((info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2728 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2729 if (kr
!= KERN_SUCCESS
) {
2730 return KERN_FAILURE
;
2734 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2736 const uint16_t page_start_offset
= (uint16_t)(page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2739 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2740 if (kr
!= KERN_SUCCESS
) {
2741 return KERN_FAILURE
;
2745 return KERN_SUCCESS
;
2749 static kern_return_t
2750 vm_shared_region_slide_page_v3(
2751 vm_shared_region_slide_info_t si
,
2753 __unused mach_vm_offset_t uservaddr
,
2755 #if !__has_feature(ptrauth_calls)
2757 #endif /* !__has_feature(ptrauth_calls) */
2760 vm_shared_region_slide_info_entry_v3_t s_info
= &si
->si_slide_info_entry
->v3
;
2761 const uint32_t slide_amount
= si
->si_slide
;
2763 uint8_t *page_content
= (uint8_t *)vaddr
;
2764 uint16_t page_entry
;
2766 if (pageIndex
>= s_info
->page_starts_count
) {
2767 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2768 pageIndex
, s_info
->page_starts_count
);
2769 return KERN_FAILURE
;
2771 page_entry
= s_info
->page_starts
[pageIndex
];
2773 if (page_entry
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2774 return KERN_SUCCESS
;
2777 uint8_t* rebaseLocation
= page_content
;
2778 uint64_t delta
= page_entry
;
2780 rebaseLocation
+= delta
;
2782 memcpy(&value
, rebaseLocation
, sizeof(value
));
2783 delta
= ((value
& 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2785 // A pointer is one of :
2787 // uint64_t pointerValue : 51;
2788 // uint64_t offsetToNextPointer : 11;
2789 // uint64_t isBind : 1 = 0;
2790 // uint64_t authenticated : 1 = 0;
2793 // uint32_t offsetFromSharedCacheBase;
2794 // uint16_t diversityData;
2795 // uint16_t hasAddressDiversity : 1;
2796 // uint16_t hasDKey : 1;
2797 // uint16_t hasBKey : 1;
2798 // uint16_t offsetToNextPointer : 11;
2799 // uint16_t isBind : 1;
2800 // uint16_t authenticated : 1 = 1;
2803 bool isBind
= (value
& (1ULL << 62)) == 1;
2805 return KERN_FAILURE
;
2808 #if __has_feature(ptrauth_calls)
2809 uint16_t diversity_data
= (uint16_t)(value
>> 32);
2810 bool hasAddressDiversity
= (value
& (1ULL << 48)) != 0;
2811 ptrauth_key key
= (ptrauth_key
)((value
>> 49) & 0x3);
2812 #endif /* __has_feature(ptrauth_calls) */
2813 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
2815 if (isAuthenticated
) {
2816 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2817 value
= (value
& 0xFFFFFFFF) + slide_amount
;
2818 // Add in the offset from the mach_header
2819 const uint64_t value_add
= s_info
->value_add
;
2822 #if __has_feature(ptrauth_calls)
2823 uint64_t discriminator
= diversity_data
;
2824 if (hasAddressDiversity
) {
2825 // First calculate a new discriminator using the address of where we are trying to store the value
2826 uintptr_t pageOffset
= rebaseLocation
- page_content
;
2827 discriminator
= __builtin_ptrauth_blend_discriminator((void*)(((uintptr_t)uservaddr
) + pageOffset
), discriminator
);
2830 if (jop_key
!= 0 && si
->si_ptrauth
&& !arm_user_jop_disabled()) {
2832 * these pointers are used in user mode. disable the kernel key diversification
2833 * so we can sign them for use in user mode.
2835 value
= (uintptr_t)pmap_sign_user_ptr((void *)value
, key
, discriminator
, jop_key
);
2837 #endif /* __has_feature(ptrauth_calls) */
2839 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2840 // Regular pointer which needs to fit in 51-bits of value.
2841 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2842 // and the bottom 43-bits to be fit in to 51-bits.
2843 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
2844 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
2845 uint64_t targetValue
= (top8Bits
<< 13) | bottom43Bits
;
2846 value
= targetValue
+ slide_amount
;
2849 memcpy(rebaseLocation
, &value
, sizeof(value
));
2850 } while (delta
!= 0);
2852 return KERN_SUCCESS
;
2855 static kern_return_t
2857 uint8_t *page_content
,
2858 uint16_t start_offset
,
2859 uint32_t slide_amount
,
2860 vm_shared_region_slide_info_entry_v4_t s_info
)
2862 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2864 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2865 const uint32_t value_mask
= ~delta_mask
;
2866 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2867 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2869 uint32_t page_offset
= start_offset
;
2872 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2876 loc
= page_content
+ page_offset
;
2877 memcpy(&value
, loc
, sizeof(value
));
2878 delta
= (value
& delta_mask
) >> delta_shift
;
2879 value
&= value_mask
;
2881 if ((value
& 0xFFFF8000) == 0) {
2882 // small positive non-pointer, use as-is
2883 } else if ((value
& 0x3FFF8000) == 0x3FFF8000) {
2884 // small negative non-pointer
2885 value
|= 0xC0000000;
2887 // pointer that needs rebasing
2889 value
+= slide_amount
;
2891 memcpy(loc
, &value
, sizeof(value
));
2892 page_offset
+= delta
;
2895 /* If the offset went past the end of the page, then the slide data is invalid. */
2896 if (page_offset
> last_page_offset
) {
2897 return KERN_FAILURE
;
2899 return KERN_SUCCESS
;
2902 static kern_return_t
2903 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2905 vm_shared_region_slide_info_entry_v4_t s_info
= &si
->si_slide_info_entry
->v4
;
2906 const uint32_t slide_amount
= si
->si_slide
;
2908 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2909 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2911 uint8_t *page_content
= (uint8_t *)vaddr
;
2912 uint16_t page_entry
;
2914 if (pageIndex
>= s_info
->page_starts_count
) {
2915 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2916 pageIndex
, s_info
->page_starts_count
);
2917 return KERN_FAILURE
;
2919 page_entry
= page_starts
[pageIndex
];
2921 if (page_entry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2922 return KERN_SUCCESS
;
2925 if (page_entry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2926 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
;
2930 uint16_t page_start_offset
;
2933 if (chain_index
>= s_info
->page_extras_count
) {
2934 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2935 chain_index
, s_info
->page_extras_count
);
2936 return KERN_FAILURE
;
2938 info
= page_extras
[chain_index
];
2939 page_start_offset
= (uint16_t)((info
& DYLD_CACHE_SLIDE4_PAGE_INDEX
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2941 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2942 if (kr
!= KERN_SUCCESS
) {
2943 return KERN_FAILURE
;
2947 } while (!(info
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
));
2949 const uint16_t page_start_offset
= (uint16_t)(page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
);
2952 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2953 if (kr
!= KERN_SUCCESS
) {
2954 return KERN_FAILURE
;
2958 return KERN_SUCCESS
;
2964 vm_shared_region_slide_page(
2965 vm_shared_region_slide_info_t si
,
2967 mach_vm_offset_t uservaddr
,
2971 switch (si
->si_slide_info_entry
->version
) {
2973 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2975 return vm_shared_region_slide_page_v3(si
, vaddr
, uservaddr
, pageIndex
, jop_key
);
2977 return vm_shared_region_slide_page_v4(si
, vaddr
, pageIndex
);
2979 return KERN_FAILURE
;
2983 /******************************************************************************/
2984 /* Comm page support */
2985 /******************************************************************************/
2987 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2988 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2989 vm_named_entry_t commpage32_entry
= NULL
;
2990 vm_named_entry_t commpage64_entry
= NULL
;
2991 vm_map_t commpage32_map
= VM_MAP_NULL
;
2992 vm_map_t commpage64_map
= VM_MAP_NULL
;
2994 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2995 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2996 vm_named_entry_t commpage_text32_entry
= NULL
;
2997 vm_named_entry_t commpage_text64_entry
= NULL
;
2998 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2999 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
3001 user32_addr_t commpage_text32_location
= 0;
3002 user64_addr_t commpage_text64_location
= 0;
3004 #if defined(__i386__) || defined(__x86_64__)
3006 * Create a memory entry, VM submap and pmap for one commpage.
3010 ipc_port_t
*handlep
,
3014 vm_named_entry_t mem_entry
;
3017 SHARED_REGION_TRACE_DEBUG(
3018 ("commpage: -> _init(0x%llx)\n",
3021 kr
= mach_memory_entry_allocate(&mem_entry
,
3023 if (kr
!= KERN_SUCCESS
) {
3024 panic("_vm_commpage_init: could not allocate mem_entry");
3026 new_map
= vm_map_create(pmap_create_options(NULL
, 0, 0), 0, size
, PMAP_CREATE_64BIT
);
3027 if (new_map
== VM_MAP_NULL
) {
3028 panic("_vm_commpage_init: could not allocate VM map");
3030 mem_entry
->backing
.map
= new_map
;
3031 mem_entry
->internal
= TRUE
;
3032 mem_entry
->is_sub_map
= TRUE
;
3033 mem_entry
->offset
= 0;
3034 mem_entry
->protection
= VM_PROT_ALL
;
3035 mem_entry
->size
= size
;
3037 SHARED_REGION_TRACE_DEBUG(
3038 ("commpage: _init(0x%llx) <- %p\n",
3039 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
3045 * Initialize the comm text pages at boot time
3048 vm_commpage_text_init(void)
3050 SHARED_REGION_TRACE_DEBUG(
3051 ("commpage text: ->init()\n"));
3052 #if defined(__i386__) || defined(__x86_64__)
3053 /* create the 32 bit comm text page */
3054 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
3055 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
3056 commpage_text32_entry
= (vm_named_entry_t
) ip_get_kobject(commpage_text32_handle
);
3057 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
3058 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
3059 /* XXX if (cpu_is_64bit_capable()) ? */
3060 /* create the 64-bit comm page */
3061 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
3062 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
3063 commpage_text64_entry
= (vm_named_entry_t
) ip_get_kobject(commpage_text64_handle
);
3064 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
3065 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
3068 commpage_text_populate();
3070 /* populate the routines in here */
3071 SHARED_REGION_TRACE_DEBUG(
3072 ("commpage text: init() <-\n"));
3076 * Initialize the comm pages at boot time.
3079 vm_commpage_init(void)
3081 SHARED_REGION_TRACE_DEBUG(
3082 ("commpage: -> init()\n"));
3084 #if defined(__i386__) || defined(__x86_64__)
3085 /* create the 32-bit comm page */
3086 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
3087 commpage32_entry
= (vm_named_entry_t
) ip_get_kobject(commpage32_handle
);
3088 commpage32_map
= commpage32_entry
->backing
.map
;
3090 /* XXX if (cpu_is_64bit_capable()) ? */
3091 /* create the 64-bit comm page */
3092 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
3093 commpage64_entry
= (vm_named_entry_t
) ip_get_kobject(commpage64_handle
);
3094 commpage64_map
= commpage64_entry
->backing
.map
;
3096 #endif /* __i386__ || __x86_64__ */
3098 /* populate them according to this specific platform */
3099 commpage_populate();
3100 __commpage_setup
= 1;
3101 #if !CONFIG_EMBEDDED
3102 if (__system_power_source
== 0) {
3103 post_sys_powersource_internal(0, 1);
3107 SHARED_REGION_TRACE_DEBUG(
3108 ("commpage: init() <-\n"));
3112 * Enter the appropriate comm page into the task's address space.
3113 * This is called at exec() time via vm_map_exec().
3121 #if defined(__arm__)
3122 #pragma unused(is64bit)
3125 return KERN_SUCCESS
;
3126 #elif defined(__arm64__)
3127 #pragma unused(is64bit)
3130 pmap_insert_sharedpage(vm_map_pmap(map
));
3131 return KERN_SUCCESS
;
3133 ipc_port_t commpage_handle
, commpage_text_handle
;
3134 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
3135 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
3137 vm_map_kernel_flags_t vmk_flags
;
3140 SHARED_REGION_TRACE_DEBUG(
3141 ("commpage: -> enter(%p,%p)\n",
3142 (void *)VM_KERNEL_ADDRPERM(map
),
3143 (void *)VM_KERNEL_ADDRPERM(task
)));
3145 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
3146 /* the comm page is likely to be beyond the actual end of the VM map */
3147 vm_flags
= VM_FLAGS_FIXED
;
3148 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
3149 vmk_flags
.vmkf_beyond_max
= TRUE
;
3151 /* select the appropriate comm page for this task */
3152 assert(!(is64bit
^ vm_map_is_64bit(map
)));
3154 commpage_handle
= commpage64_handle
;
3155 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
3156 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
3157 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
3158 objc_address
= _COMM_PAGE64_OBJC_BASE
;
3159 commpage_text_handle
= commpage_text64_handle
;
3160 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
3162 commpage_handle
= commpage32_handle
;
3164 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
3165 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
3166 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
3167 objc_address
= _COMM_PAGE32_OBJC_BASE
;
3168 commpage_text_handle
= commpage_text32_handle
;
3169 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
3172 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
3173 if ((commpage_address
& (pmap_commpage_size_min(map
->pmap
) - 1)) == 0 &&
3174 (commpage_size
& (pmap_commpage_size_min(map
->pmap
) - 1)) == 0) {
3175 /* the commpage is properly aligned or sized for pmap-nesting */
3176 tag
= VM_MEMORY_SHARED_PMAP
;
3177 vmk_flags
.vmkf_nested_pmap
= TRUE
;
3179 /* map the comm page in the task's address space */
3180 assert(commpage_handle
!= IPC_PORT_NULL
);
3181 kr
= vm_map_enter_mem_object(
3195 if (kr
!= KERN_SUCCESS
) {
3196 SHARED_REGION_TRACE_ERROR(
3197 ("commpage: enter(%p,0x%llx,0x%llx) "
3198 "commpage %p mapping failed 0x%x\n",
3199 (void *)VM_KERNEL_ADDRPERM(map
),
3200 (long long)commpage_address
,
3201 (long long)commpage_size
,
3202 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
3205 /* map the comm text page in the task's address space */
3206 assert(commpage_text_handle
!= IPC_PORT_NULL
);
3207 kr
= vm_map_enter_mem_object(
3209 &commpage_text_address
,
3215 commpage_text_handle
,
3218 VM_PROT_READ
| VM_PROT_EXECUTE
,
3219 VM_PROT_READ
| VM_PROT_EXECUTE
,
3221 if (kr
!= KERN_SUCCESS
) {
3222 SHARED_REGION_TRACE_ERROR(
3223 ("commpage text: enter(%p,0x%llx,0x%llx) "
3224 "commpage text %p mapping failed 0x%x\n",
3225 (void *)VM_KERNEL_ADDRPERM(map
),
3226 (long long)commpage_text_address
,
3227 (long long)commpage_text_size
,
3228 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
3232 * Since we're here, we also pre-allocate some virtual space for the
3233 * Objective-C run-time, if needed...
3235 if (objc_size
!= 0) {
3236 kr
= vm_map_enter_mem_object(
3249 VM_INHERIT_DEFAULT
);
3250 if (kr
!= KERN_SUCCESS
) {
3251 SHARED_REGION_TRACE_ERROR(
3252 ("commpage: enter(%p,0x%llx,0x%llx) "
3253 "objc mapping failed 0x%x\n",
3254 (void *)VM_KERNEL_ADDRPERM(map
),
3255 (long long)objc_address
,
3256 (long long)objc_size
, kr
));
3260 SHARED_REGION_TRACE_DEBUG(
3261 ("commpage: enter(%p,%p) <- 0x%x\n",
3262 (void *)VM_KERNEL_ADDRPERM(map
),
3263 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
3269 vm_shared_region_slide(
3271 mach_vm_offset_t entry_start_address
,
3272 mach_vm_size_t entry_size
,
3273 mach_vm_offset_t slide_start
,
3274 mach_vm_size_t slide_size
,
3275 mach_vm_offset_t slid_mapping
,
3276 memory_object_control_t sr_file_control
,
3279 vm_shared_region_t sr
;
3280 kern_return_t error
;
3282 SHARED_REGION_TRACE_DEBUG(
3283 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
3284 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
3286 sr
= vm_shared_region_get(current_task());
3288 printf("%s: no shared region?\n", __FUNCTION__
);
3289 SHARED_REGION_TRACE_DEBUG(
3290 ("vm_shared_region_slide: <- %d (no shared region)\n",
3292 return KERN_FAILURE
;
3296 * Protect from concurrent access.
3298 vm_shared_region_lock();
3299 while (sr
->sr_slide_in_progress
) {
3300 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
3303 sr
->sr_slide_in_progress
= TRUE
;
3304 vm_shared_region_unlock();
3306 error
= vm_shared_region_slide_mapping(sr
,
3307 (user_addr_t
)slide_start
,
3309 entry_start_address
,
3316 printf("slide_info initialization failed with kr=%d\n", error
);
3319 vm_shared_region_lock();
3321 assert(sr
->sr_slide_in_progress
);
3322 sr
->sr_slide_in_progress
= FALSE
;
3323 thread_wakeup(&sr
->sr_slide_in_progress
);
3325 #ifndef CONFIG_EMBEDDED
3326 if (error
== KERN_SUCCESS
) {
3327 shared_region_completed_slide
= TRUE
;
3330 vm_shared_region_unlock();
3332 vm_shared_region_deallocate(sr
);
3334 SHARED_REGION_TRACE_DEBUG(
3335 ("vm_shared_region_slide: <- %d\n",
3342 * Used during Authenticated Root Volume macOS boot.
3343 * Launchd re-execs itself and wants the new launchd to use
3344 * the shared cache from the new root volume. This call
3345 * makes all the existing shared caches stale to allow
3349 vm_shared_region_pivot(void)
3351 vm_shared_region_t shared_region
= NULL
;
3353 vm_shared_region_lock();
3355 queue_iterate(&vm_shared_region_queue
, shared_region
, vm_shared_region_t
, sr_q
) {
3356 assert(shared_region
->sr_ref_count
> 0);
3357 shared_region
->sr_stale
= TRUE
;
3358 if (shared_region
->sr_timer_call
) {
3360 * We have a shared region ready to be destroyed
3361 * and just waiting for a delayed timer to fire.
3362 * Marking it stale cements its ineligibility to
3363 * be used ever again. So let's shorten the timer
3364 * aggressively down to 10 milliseconds and get rid of it.
3365 * This is a single quantum and we don't need to go
3366 * shorter than this duration. We want it to be short
3367 * enough, however, because we could have an unmount
3368 * of the volume hosting this shared region just behind
3372 assert(shared_region
->sr_ref_count
== 1);
3375 * Free the old timer call. Returns with a reference held.
3376 * If the old timer has fired and is waiting for the vm_shared_region_lock
3377 * lock, we will just return with an additional ref_count i.e. 2.
3378 * The old timer will then fire and just drop the ref count down to 1
3379 * with no other modifications.
3381 vm_shared_region_reference_locked(shared_region
);
3383 /* set up the timer. Keep the reference from above for this timer.*/
3384 shared_region
->sr_timer_call
= thread_call_allocate(
3385 (thread_call_func_t
) vm_shared_region_timeout
,
3386 (thread_call_param_t
) shared_region
);
3388 /* schedule the timer */
3389 clock_interval_to_deadline(10, /* 10 milliseconds */
3392 thread_call_enter_delayed(shared_region
->sr_timer_call
,
3395 SHARED_REGION_TRACE_DEBUG(
3396 ("shared_region: pivot(%p): armed timer\n",
3397 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
3401 vm_shared_region_unlock();
3405 * Routine to mark any non-standard slide shared cache region as stale.
3406 * This causes the next "reslide" spawn to create a new shared region.
3409 vm_shared_region_reslide_stale(void)
3411 #if __has_feature(ptrauth_calls)
3412 vm_shared_region_t shared_region
= NULL
;
3414 vm_shared_region_lock();
3416 queue_iterate(&vm_shared_region_queue
, shared_region
, vm_shared_region_t
, sr_q
) {
3417 assert(shared_region
->sr_ref_count
> 0);
3418 if (!shared_region
->sr_stale
&& shared_region
->sr_reslide
) {
3419 shared_region
->sr_stale
= TRUE
;
3420 vm_shared_region_reslide_count
++;
3424 vm_shared_region_unlock();
3425 #endif /* __has_feature(ptrauth_calls) */
3429 * report if the task is using a reslide shared cache region.
3432 vm_shared_region_is_reslide(__unused
struct task
*task
)
3434 bool is_reslide
= FALSE
;
3435 #if !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls)
3436 vm_shared_region_t sr
= vm_shared_region_get(task
);
3439 is_reslide
= sr
->sr_reslide
;
3440 vm_shared_region_deallocate(sr
);
3442 #endif /* !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) */
3447 * This is called from powermanagement code to let kernel know the current source of power.
3448 * 0 if it is external source (connected to power )
3449 * 1 if it is internal power source ie battery
3452 #if !CONFIG_EMBEDDED
3453 post_sys_powersource(int i
)
3455 post_sys_powersource(__unused
int i
)
3458 #if !CONFIG_EMBEDDED
3459 post_sys_powersource_internal(i
, 0);
3464 #if !CONFIG_EMBEDDED
3466 post_sys_powersource_internal(int i
, int internal
)
3468 if (internal
== 0) {
3469 __system_power_source
= i
;
3475 vm_shared_region_root_dir(
3476 struct vm_shared_region
*sr
)
3480 vm_shared_region_lock();
3481 vnode
= sr
->sr_root_dir
;
3482 vm_shared_region_unlock();