2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 #if defined (__arm__) || defined(__arm64__)
116 #include <arm/cpu_data_internal.h>
120 * the following codes are used in the subclass
121 * of the DBG_MACH_SHAREDREGION class
123 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
126 /* "dyld" uses this to figure out what the kernel supports */
127 int shared_region_version
= 3;
129 /* trace level, output is sent to the system log file */
130 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
132 /* should local (non-chroot) shared regions persist when no task uses them ? */
133 int shared_region_persistence
= 0; /* no by default */
135 /* delay before reclaiming an unused shared region */
136 int shared_region_destroy_delay
= 120; /* in seconds */
138 struct vm_shared_region
*init_task_shared_region
= NULL
;
140 #ifndef CONFIG_EMBEDDED
142 * Only one cache gets to slide on Desktop, since we can't
143 * tear down slide info properly today and the desktop actually
144 * produces lots of shared caches.
146 boolean_t shared_region_completed_slide
= FALSE
;
149 /* this lock protects all the shared region data structures */
150 lck_grp_t
*vm_shared_region_lck_grp
;
151 lck_mtx_t vm_shared_region_lock
;
153 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
154 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
155 #define vm_shared_region_sleep(event, interruptible) \
156 lck_mtx_sleep(&vm_shared_region_lock, \
161 /* the list of currently available shared regions (one per environment) */
162 queue_head_t vm_shared_region_queue
;
164 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
165 static vm_shared_region_t
vm_shared_region_create(
168 cpu_subtype_t cpu_subtype
,
170 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
172 static void vm_shared_region_timeout(thread_call_param_t param0
,
173 thread_call_param_t param1
);
174 kern_return_t
vm_shared_region_slide_mapping(
175 vm_shared_region_t sr
,
176 mach_vm_size_t slide_info_size
,
177 mach_vm_offset_t start
,
179 mach_vm_offset_t slid_mapping
,
181 memory_object_control_t
); /* forward */
183 static int __commpage_setup
= 0;
184 #if defined(__i386__) || defined(__x86_64__)
185 static int __system_power_source
= 1; /* init to extrnal power source */
186 static void post_sys_powersource_internal(int i
, int internal
);
187 #endif /* __i386__ || __x86_64__ */
191 * Initialize the module...
194 vm_shared_region_init(void)
196 SHARED_REGION_TRACE_DEBUG(
197 ("shared_region: -> init\n"));
199 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
201 lck_mtx_init(&vm_shared_region_lock
,
202 vm_shared_region_lck_grp
,
205 queue_init(&vm_shared_region_queue
);
207 SHARED_REGION_TRACE_DEBUG(
208 ("shared_region: <- init\n"));
212 * Retrieve a task's shared region and grab an extra reference to
213 * make sure it doesn't disappear while the caller is using it.
214 * The caller is responsible for consuming that extra reference if
218 vm_shared_region_get(
221 vm_shared_region_t shared_region
;
223 SHARED_REGION_TRACE_DEBUG(
224 ("shared_region: -> get(%p)\n",
225 (void *)VM_KERNEL_ADDRPERM(task
)));
228 vm_shared_region_lock();
229 shared_region
= task
->shared_region
;
231 assert(shared_region
->sr_ref_count
> 0);
232 vm_shared_region_reference_locked(shared_region
);
234 vm_shared_region_unlock();
237 SHARED_REGION_TRACE_DEBUG(
238 ("shared_region: get(%p) <- %p\n",
239 (void *)VM_KERNEL_ADDRPERM(task
),
240 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
242 return shared_region
;
246 * Get the base address of the shared region.
247 * That's the address at which it needs to be mapped in the process's address
249 * No need to lock since this data is set when the shared region is
250 * created and is never modified after that. The caller must hold an extra
251 * reference on the shared region to prevent it from being destroyed.
254 vm_shared_region_base_address(
255 vm_shared_region_t shared_region
)
257 SHARED_REGION_TRACE_DEBUG(
258 ("shared_region: -> base_address(%p)\n",
259 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
260 assert(shared_region
->sr_ref_count
> 1);
261 SHARED_REGION_TRACE_DEBUG(
262 ("shared_region: base_address(%p) <- 0x%llx\n",
263 (void *)VM_KERNEL_ADDRPERM(shared_region
),
264 (long long)shared_region
->sr_base_address
));
265 return shared_region
->sr_base_address
;
269 * Get the size of the shared region.
270 * That's the size that needs to be mapped in the process's address
272 * No need to lock since this data is set when the shared region is
273 * created and is never modified after that. The caller must hold an extra
274 * reference on the shared region to prevent it from being destroyed.
277 vm_shared_region_size(
278 vm_shared_region_t shared_region
)
280 SHARED_REGION_TRACE_DEBUG(
281 ("shared_region: -> size(%p)\n",
282 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
283 assert(shared_region
->sr_ref_count
> 1);
284 SHARED_REGION_TRACE_DEBUG(
285 ("shared_region: size(%p) <- 0x%llx\n",
286 (void *)VM_KERNEL_ADDRPERM(shared_region
),
287 (long long)shared_region
->sr_size
));
288 return shared_region
->sr_size
;
292 * Get the memory entry of the shared region.
293 * That's the "memory object" that needs to be mapped in the process's address
295 * No need to lock since this data is set when the shared region is
296 * created and is never modified after that. The caller must hold an extra
297 * reference on the shared region to prevent it from being destroyed.
300 vm_shared_region_mem_entry(
301 vm_shared_region_t shared_region
)
303 SHARED_REGION_TRACE_DEBUG(
304 ("shared_region: -> mem_entry(%p)\n",
305 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
306 assert(shared_region
->sr_ref_count
> 1);
307 SHARED_REGION_TRACE_DEBUG(
308 ("shared_region: mem_entry(%p) <- %p\n",
309 (void *)VM_KERNEL_ADDRPERM(shared_region
),
310 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
311 return shared_region
->sr_mem_entry
;
315 vm_shared_region_vm_map(
316 vm_shared_region_t shared_region
)
318 ipc_port_t sr_handle
;
319 vm_named_entry_t sr_mem_entry
;
322 SHARED_REGION_TRACE_DEBUG(
323 ("shared_region: -> vm_map(%p)\n",
324 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
325 assert(shared_region
->sr_ref_count
> 1);
327 sr_handle
= shared_region
->sr_mem_entry
;
328 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
329 sr_map
= sr_mem_entry
->backing
.map
;
330 assert(sr_mem_entry
->is_sub_map
);
332 SHARED_REGION_TRACE_DEBUG(
333 ("shared_region: vm_map(%p) <- %p\n",
334 (void *)VM_KERNEL_ADDRPERM(shared_region
),
335 (void *)VM_KERNEL_ADDRPERM(sr_map
)));
339 vm_shared_region_get_slide(
340 vm_shared_region_t shared_region
)
342 SHARED_REGION_TRACE_DEBUG(
343 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
344 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
345 assert(shared_region
->sr_ref_count
> 1);
346 SHARED_REGION_TRACE_DEBUG(
347 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
348 (void *)VM_KERNEL_ADDRPERM(shared_region
),
349 shared_region
->sr_slide_info
.slide
));
351 /* 0 if we haven't slid */
352 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
353 shared_region
->sr_slide_info
.slide
== 0);
355 return shared_region
->sr_slide_info
.slide
;
358 vm_shared_region_slide_info_t
359 vm_shared_region_get_slide_info(
360 vm_shared_region_t shared_region
)
362 SHARED_REGION_TRACE_DEBUG(
363 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
364 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
365 assert(shared_region
->sr_ref_count
> 1);
366 SHARED_REGION_TRACE_DEBUG(
367 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
368 (void *)VM_KERNEL_ADDRPERM(shared_region
),
369 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
370 return &shared_region
->sr_slide_info
;
374 * Set the shared region the process should use.
375 * A NULL new shared region means that we just want to release the old
377 * The caller should already have an extra reference on the new shared region
378 * (if any). We release a reference on the old shared region (if any).
381 vm_shared_region_set(
383 vm_shared_region_t new_shared_region
)
385 vm_shared_region_t old_shared_region
;
387 SHARED_REGION_TRACE_DEBUG(
388 ("shared_region: -> set(%p, %p)\n",
389 (void *)VM_KERNEL_ADDRPERM(task
),
390 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
393 vm_shared_region_lock();
395 old_shared_region
= task
->shared_region
;
396 if (new_shared_region
) {
397 assert(new_shared_region
->sr_ref_count
> 0);
400 task
->shared_region
= new_shared_region
;
402 vm_shared_region_unlock();
405 if (old_shared_region
) {
406 assert(old_shared_region
->sr_ref_count
> 0);
407 vm_shared_region_deallocate(old_shared_region
);
410 SHARED_REGION_TRACE_DEBUG(
411 ("shared_region: set(%p) <- old=%p new=%p\n",
412 (void *)VM_KERNEL_ADDRPERM(task
),
413 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
414 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
418 * Lookup up the shared region for the desired environment.
419 * If none is found, create a new (empty) one.
420 * Grab an extra reference on the returned shared region, to make sure
421 * it doesn't get destroyed before the caller is done with it. The caller
422 * is responsible for consuming that extra reference if necessary.
425 vm_shared_region_lookup(
428 cpu_subtype_t cpu_subtype
,
431 vm_shared_region_t shared_region
;
432 vm_shared_region_t new_shared_region
;
434 SHARED_REGION_TRACE_DEBUG(
435 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n",
437 (void *)VM_KERNEL_ADDRPERM(root_dir
),
438 cputype
, cpu_subtype
, is_64bit
));
440 shared_region
= NULL
;
441 new_shared_region
= NULL
;
443 vm_shared_region_lock();
445 queue_iterate(&vm_shared_region_queue
,
449 assert(shared_region
->sr_ref_count
> 0);
450 if (shared_region
->sr_cpu_type
== cputype
&&
451 shared_region
->sr_cpu_subtype
== cpu_subtype
&&
452 shared_region
->sr_root_dir
== root_dir
&&
453 shared_region
->sr_64bit
== is_64bit
) {
454 /* found a match ! */
455 vm_shared_region_reference_locked(shared_region
);
459 if (new_shared_region
== NULL
) {
460 /* no match: create a new one */
461 vm_shared_region_unlock();
462 new_shared_region
= vm_shared_region_create(root_dir
,
466 /* do the lookup again, in case we lost a race */
467 vm_shared_region_lock();
470 /* still no match: use our new one */
471 shared_region
= new_shared_region
;
472 new_shared_region
= NULL
;
473 queue_enter(&vm_shared_region_queue
,
481 vm_shared_region_unlock();
483 if (new_shared_region
) {
485 * We lost a race with someone else to create a new shared
486 * region for that environment. Get rid of our unused one.
488 assert(new_shared_region
->sr_ref_count
== 1);
489 new_shared_region
->sr_ref_count
--;
490 vm_shared_region_destroy(new_shared_region
);
491 new_shared_region
= NULL
;
494 SHARED_REGION_TRACE_DEBUG(
495 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n",
496 (void *)VM_KERNEL_ADDRPERM(root_dir
),
497 cputype
, cpu_subtype
, is_64bit
,
498 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
500 assert(shared_region
->sr_ref_count
> 0);
501 return shared_region
;
505 * Take an extra reference on a shared region.
506 * The vm_shared_region_lock should already be held by the caller.
509 vm_shared_region_reference_locked(
510 vm_shared_region_t shared_region
)
512 LCK_MTX_ASSERT(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
514 SHARED_REGION_TRACE_DEBUG(
515 ("shared_region: -> reference_locked(%p)\n",
516 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
517 assert(shared_region
->sr_ref_count
> 0);
518 shared_region
->sr_ref_count
++;
520 if (shared_region
->sr_timer_call
!= NULL
) {
523 /* cancel and free any pending timeout */
524 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
526 thread_call_free(shared_region
->sr_timer_call
);
527 shared_region
->sr_timer_call
= NULL
;
528 /* release the reference held by the cancelled timer */
529 shared_region
->sr_ref_count
--;
531 /* the timer will drop the reference and free itself */
535 SHARED_REGION_TRACE_DEBUG(
536 ("shared_region: reference_locked(%p) <- %d\n",
537 (void *)VM_KERNEL_ADDRPERM(shared_region
),
538 shared_region
->sr_ref_count
));
542 * Release a reference on the shared region.
543 * Destroy it if there are no references left.
546 vm_shared_region_deallocate(
547 vm_shared_region_t shared_region
)
549 SHARED_REGION_TRACE_DEBUG(
550 ("shared_region: -> deallocate(%p)\n",
551 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
553 vm_shared_region_lock();
555 assert(shared_region
->sr_ref_count
> 0);
557 if (shared_region
->sr_root_dir
== NULL
) {
559 * Local (i.e. based on the boot volume) shared regions
560 * can persist or not based on the "shared_region_persistence"
562 * Make sure that this one complies.
564 * See comments in vm_shared_region_slide() for notes about
565 * shared regions we have slid (which are not torn down currently).
567 if (shared_region_persistence
&&
568 !shared_region
->sr_persists
) {
569 /* make this one persistent */
570 shared_region
->sr_ref_count
++;
571 shared_region
->sr_persists
= TRUE
;
572 } else if (!shared_region_persistence
&&
573 shared_region
->sr_persists
) {
574 /* make this one no longer persistent */
575 assert(shared_region
->sr_ref_count
> 1);
576 shared_region
->sr_ref_count
--;
577 shared_region
->sr_persists
= FALSE
;
581 assert(shared_region
->sr_ref_count
> 0);
582 shared_region
->sr_ref_count
--;
583 SHARED_REGION_TRACE_DEBUG(
584 ("shared_region: deallocate(%p): ref now %d\n",
585 (void *)VM_KERNEL_ADDRPERM(shared_region
),
586 shared_region
->sr_ref_count
));
588 if (shared_region
->sr_ref_count
== 0) {
591 assert(!shared_region
->sr_slid
);
593 if (shared_region
->sr_timer_call
== NULL
) {
594 /* hold one reference for the timer */
595 assert(!shared_region
->sr_mapping_in_progress
);
596 shared_region
->sr_ref_count
++;
598 /* set up the timer */
599 shared_region
->sr_timer_call
= thread_call_allocate(
600 (thread_call_func_t
) vm_shared_region_timeout
,
601 (thread_call_param_t
) shared_region
);
603 /* schedule the timer */
604 clock_interval_to_deadline(shared_region_destroy_delay
,
607 thread_call_enter_delayed(shared_region
->sr_timer_call
,
610 SHARED_REGION_TRACE_DEBUG(
611 ("shared_region: deallocate(%p): armed timer\n",
612 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
614 vm_shared_region_unlock();
616 /* timer expired: let go of this shared region */
619 * We can't properly handle teardown of a slid object today.
621 assert(!shared_region
->sr_slid
);
624 * Remove it from the queue first, so no one can find
627 queue_remove(&vm_shared_region_queue
,
631 vm_shared_region_unlock();
633 /* ... and destroy it */
634 vm_shared_region_destroy(shared_region
);
635 shared_region
= NULL
;
638 vm_shared_region_unlock();
641 SHARED_REGION_TRACE_DEBUG(
642 ("shared_region: deallocate(%p) <-\n",
643 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
647 vm_shared_region_timeout(
648 thread_call_param_t param0
,
649 __unused thread_call_param_t param1
)
651 vm_shared_region_t shared_region
;
653 shared_region
= (vm_shared_region_t
) param0
;
655 vm_shared_region_deallocate(shared_region
);
659 * Create a new (empty) shared region for a new environment.
661 static vm_shared_region_t
662 vm_shared_region_create(
665 cpu_subtype_t cpu_subtype
,
669 vm_named_entry_t mem_entry
;
670 ipc_port_t mem_entry_port
;
671 vm_shared_region_t shared_region
;
672 vm_shared_region_slide_info_t si
;
674 mach_vm_offset_t base_address
, pmap_nesting_start
;
675 mach_vm_size_t size
, pmap_nesting_size
;
677 SHARED_REGION_TRACE_INFO(
678 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n",
679 (void *)VM_KERNEL_ADDRPERM(root_dir
),
680 cputype
, cpu_subtype
, is_64bit
));
685 mem_entry_port
= IPC_PORT_NULL
;
686 sub_map
= VM_MAP_NULL
;
688 /* create a new shared region structure... */
689 shared_region
= kalloc(sizeof(*shared_region
));
690 if (shared_region
== NULL
) {
691 SHARED_REGION_TRACE_ERROR(
692 ("shared_region: create: couldn't allocate\n"));
696 /* figure out the correct settings for the desired environment */
699 #if defined(__arm64__)
701 base_address
= SHARED_REGION_BASE_ARM64
;
702 size
= SHARED_REGION_SIZE_ARM64
;
703 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM64
;
704 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM64
;
706 #elif !defined(__arm__)
708 base_address
= SHARED_REGION_BASE_X86_64
;
709 size
= SHARED_REGION_SIZE_X86_64
;
710 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
711 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
713 case CPU_TYPE_POWERPC
:
714 base_address
= SHARED_REGION_BASE_PPC64
;
715 size
= SHARED_REGION_SIZE_PPC64
;
716 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
717 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
721 SHARED_REGION_TRACE_ERROR(
722 ("shared_region: create: unknown cpu type %d\n",
724 kfree(shared_region
, sizeof(*shared_region
));
725 shared_region
= NULL
;
730 #if defined(__arm__) || defined(__arm64__)
733 base_address
= SHARED_REGION_BASE_ARM
;
734 size
= SHARED_REGION_SIZE_ARM
;
735 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_ARM
;
736 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_ARM
;
740 base_address
= SHARED_REGION_BASE_I386
;
741 size
= SHARED_REGION_SIZE_I386
;
742 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
743 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
745 case CPU_TYPE_POWERPC
:
746 base_address
= SHARED_REGION_BASE_PPC
;
747 size
= SHARED_REGION_SIZE_PPC
;
748 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
749 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
753 SHARED_REGION_TRACE_ERROR(
754 ("shared_region: create: unknown cpu type %d\n",
756 kfree(shared_region
, sizeof(*shared_region
));
757 shared_region
= NULL
;
762 /* create a memory entry structure and a Mach port handle */
763 kr
= mach_memory_entry_allocate(&mem_entry
,
765 if (kr
!= KERN_SUCCESS
) {
766 kfree(shared_region
, sizeof(*shared_region
));
767 shared_region
= NULL
;
768 SHARED_REGION_TRACE_ERROR(
769 ("shared_region: create: "
770 "couldn't allocate mem_entry\n"));
774 #if defined(__arm__) || defined(__arm64__)
776 struct pmap
*pmap_nested
;
778 pmap_nested
= pmap_create(NULL
, 0, is_64bit
);
779 if (pmap_nested
!= PMAP_NULL
) {
780 pmap_set_nested(pmap_nested
);
781 sub_map
= vm_map_create(pmap_nested
, 0, size
, TRUE
);
782 #if defined(__arm64__)
784 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) {
785 /* enforce 16KB alignment of VM map entries */
786 vm_map_set_page_shift(sub_map
,
787 SIXTEENK_PAGE_SHIFT
);
789 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
790 /* enforce 16KB alignment for watch targets with new ABI */
791 vm_map_set_page_shift(sub_map
, SIXTEENK_PAGE_SHIFT
);
792 #endif /* __arm64__ */
794 sub_map
= VM_MAP_NULL
;
798 /* create a VM sub map and its pmap */
799 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
803 if (sub_map
== VM_MAP_NULL
) {
804 ipc_port_release_send(mem_entry_port
);
805 kfree(shared_region
, sizeof(*shared_region
));
806 shared_region
= NULL
;
807 SHARED_REGION_TRACE_ERROR(
808 ("shared_region: create: "
809 "couldn't allocate map\n"));
813 assert(!sub_map
->disable_vmentry_reuse
);
814 sub_map
->is_nested_map
= TRUE
;
816 /* make the memory entry point to the VM sub map */
817 mem_entry
->is_sub_map
= TRUE
;
818 mem_entry
->backing
.map
= sub_map
;
819 mem_entry
->size
= size
;
820 mem_entry
->protection
= VM_PROT_ALL
;
822 /* make the shared region point at the memory entry */
823 shared_region
->sr_mem_entry
= mem_entry_port
;
825 /* fill in the shared region's environment and settings */
826 shared_region
->sr_base_address
= base_address
;
827 shared_region
->sr_size
= size
;
828 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
829 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
830 shared_region
->sr_cpu_type
= cputype
;
831 shared_region
->sr_cpu_subtype
= cpu_subtype
;
832 shared_region
->sr_64bit
= is_64bit
;
833 shared_region
->sr_root_dir
= root_dir
;
835 queue_init(&shared_region
->sr_q
);
836 shared_region
->sr_mapping_in_progress
= FALSE
;
837 shared_region
->sr_slide_in_progress
= FALSE
;
838 shared_region
->sr_persists
= FALSE
;
839 shared_region
->sr_slid
= FALSE
;
840 shared_region
->sr_timer_call
= NULL
;
841 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
843 /* grab a reference for the caller */
844 shared_region
->sr_ref_count
= 1;
846 /* And set up slide info */
847 si
= &shared_region
->sr_slide_info
;
851 si
->slide_object
= NULL
;
852 si
->slide_info_size
= 0;
853 si
->slide_info_entry
= NULL
;
855 /* Initialize UUID and other metadata */
856 memset(&shared_region
->sr_uuid
, '\0', sizeof(shared_region
->sr_uuid
));
857 shared_region
->sr_uuid_copied
= FALSE
;
858 shared_region
->sr_images_count
= 0;
859 shared_region
->sr_images
= NULL
;
862 SHARED_REGION_TRACE_INFO(
863 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
864 "base=0x%llx,size=0x%llx) <- "
865 "%p mem=(%p,%p) map=%p pmap=%p\n",
866 (void *)VM_KERNEL_ADDRPERM(root_dir
),
867 cputype
, cpu_subtype
, is_64bit
,
868 (long long)base_address
,
870 (void *)VM_KERNEL_ADDRPERM(shared_region
),
871 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
872 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
873 (void *)VM_KERNEL_ADDRPERM(sub_map
),
874 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
876 SHARED_REGION_TRACE_INFO(
877 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
878 "base=0x%llx,size=0x%llx) <- NULL",
879 (void *)VM_KERNEL_ADDRPERM(root_dir
),
880 cputype
, cpu_subtype
, is_64bit
,
881 (long long)base_address
,
884 return shared_region
;
888 * Destroy a now-unused shared region.
889 * The shared region is no longer in the queue and can not be looked up.
892 vm_shared_region_destroy(
893 vm_shared_region_t shared_region
)
895 vm_named_entry_t mem_entry
;
898 SHARED_REGION_TRACE_INFO(
899 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
900 (void *)VM_KERNEL_ADDRPERM(shared_region
),
901 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
902 shared_region
->sr_cpu_type
,
903 shared_region
->sr_cpu_subtype
,
904 shared_region
->sr_64bit
));
906 assert(shared_region
->sr_ref_count
== 0);
907 assert(!shared_region
->sr_persists
);
908 assert(!shared_region
->sr_slid
);
910 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
911 assert(mem_entry
->is_sub_map
);
912 assert(!mem_entry
->internal
);
913 assert(!mem_entry
->is_copy
);
914 map
= mem_entry
->backing
.map
;
917 * Clean up the pmap first. The virtual addresses that were
918 * entered in this possibly "nested" pmap may have different values
919 * than the VM map's min and max offsets, if the VM sub map was
920 * mapped at a non-zero offset in the processes' main VM maps, which
921 * is usually the case, so the clean-up we do in vm_map_destroy() would
925 pmap_remove(map
->pmap
,
926 shared_region
->sr_base_address
,
927 (shared_region
->sr_base_address
+
928 shared_region
->sr_size
));
932 * Release our (one and only) handle on the memory entry.
933 * This will generate a no-senders notification, which will be processed
934 * by ipc_kobject_notify(), which will release the one and only
935 * reference on the memory entry and cause it to be destroyed, along
936 * with the VM sub map and its pmap.
938 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
940 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
942 if (shared_region
->sr_timer_call
) {
943 thread_call_free(shared_region
->sr_timer_call
);
948 * If slid, free those resources. We'll want this eventually,
949 * but can't handle it properly today.
951 si
= &shared_region
->sr_slide_info
;
952 if (si
->slide_info_entry
) {
953 kmem_free(kernel_map
,
954 (vm_offset_t
) si
->slide_info_entry
,
955 (vm_size_t
) si
->slide_info_size
);
956 vm_object_deallocate(si
->slide_object
);
960 /* release the shared region structure... */
961 kfree(shared_region
, sizeof(*shared_region
));
963 SHARED_REGION_TRACE_DEBUG(
964 ("shared_region: destroy(%p) <-\n",
965 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
966 shared_region
= NULL
;
970 * Gets the address of the first (in time) mapping in the shared region.
973 vm_shared_region_start_address(
974 vm_shared_region_t shared_region
,
975 mach_vm_offset_t
*start_address
)
978 mach_vm_offset_t sr_base_address
;
979 mach_vm_offset_t sr_first_mapping
;
981 SHARED_REGION_TRACE_DEBUG(
982 ("shared_region: -> start_address(%p)\n",
983 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
984 assert(shared_region
->sr_ref_count
> 1);
986 vm_shared_region_lock();
989 * Wait if there's another thread establishing a mapping
990 * in this shared region right when we're looking at it.
991 * We want a consistent view of the map...
993 while (shared_region
->sr_mapping_in_progress
) {
994 /* wait for our turn... */
995 assert(shared_region
->sr_ref_count
> 1);
996 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
999 assert(!shared_region
->sr_mapping_in_progress
);
1000 assert(shared_region
->sr_ref_count
> 1);
1002 sr_base_address
= shared_region
->sr_base_address
;
1003 sr_first_mapping
= shared_region
->sr_first_mapping
;
1005 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
1006 /* shared region is empty */
1007 kr
= KERN_INVALID_ADDRESS
;
1010 *start_address
= sr_base_address
+ sr_first_mapping
;
1013 vm_shared_region_unlock();
1015 SHARED_REGION_TRACE_DEBUG(
1016 ("shared_region: start_address(%p) <- 0x%llx\n",
1017 (void *)VM_KERNEL_ADDRPERM(shared_region
),
1018 (long long)shared_region
->sr_base_address
));
1024 vm_shared_region_undo_mappings(
1026 mach_vm_offset_t sr_base_address
,
1027 struct shared_file_mapping_np
*mappings
,
1028 unsigned int mappings_count
)
1031 vm_shared_region_t shared_region
= NULL
;
1032 boolean_t reset_shared_region_state
= FALSE
;
1034 shared_region
= vm_shared_region_get(current_task());
1035 if (shared_region
== NULL
) {
1036 printf("Failed to undo mappings because of NULL shared region.\n");
1041 if (sr_map
== NULL
) {
1042 ipc_port_t sr_handle
;
1043 vm_named_entry_t sr_mem_entry
;
1045 vm_shared_region_lock();
1046 assert(shared_region
->sr_ref_count
> 1);
1048 while (shared_region
->sr_mapping_in_progress
) {
1049 /* wait for our turn... */
1050 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1053 assert(!shared_region
->sr_mapping_in_progress
);
1054 assert(shared_region
->sr_ref_count
> 1);
1055 /* let others know we're working in this shared region */
1056 shared_region
->sr_mapping_in_progress
= TRUE
;
1058 vm_shared_region_unlock();
1060 reset_shared_region_state
= TRUE
;
1062 /* no need to lock because this data is never modified... */
1063 sr_handle
= shared_region
->sr_mem_entry
;
1064 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1065 sr_map
= sr_mem_entry
->backing
.map
;
1066 sr_base_address
= shared_region
->sr_base_address
;
1069 * Undo the mappings we've established so far.
1071 for (j
= 0; j
< mappings_count
; j
++) {
1074 if (mappings
[j
].sfm_size
== 0) {
1076 * We didn't establish this
1077 * mapping, so nothing to undo.
1081 SHARED_REGION_TRACE_INFO(
1082 ("shared_region: mapping[%d]: "
1083 "address:0x%016llx "
1086 "maxprot:0x%x prot:0x%x: "
1089 (long long)mappings
[j
].sfm_address
,
1090 (long long)mappings
[j
].sfm_size
,
1091 (long long)mappings
[j
].sfm_file_offset
,
1092 mappings
[j
].sfm_max_prot
,
1093 mappings
[j
].sfm_init_prot
));
1094 kr2
= mach_vm_deallocate(
1096 (mappings
[j
].sfm_address
-
1098 mappings
[j
].sfm_size
);
1099 assert(kr2
== KERN_SUCCESS
);
1102 if (reset_shared_region_state
) {
1103 vm_shared_region_lock();
1104 assert(shared_region
->sr_ref_count
> 1);
1105 assert(shared_region
->sr_mapping_in_progress
);
1106 /* we're done working on that shared region */
1107 shared_region
->sr_mapping_in_progress
= FALSE
;
1108 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1109 vm_shared_region_unlock();
1110 reset_shared_region_state
= FALSE
;
1113 vm_shared_region_deallocate(shared_region
);
1117 * Establish some mappings of a file in the shared region.
1118 * This is used by "dyld" via the shared_region_map_np() system call
1119 * to populate the shared region with the appropriate shared cache.
1121 * One could also call it several times to incrementally load several
1122 * libraries, as long as they do not overlap.
1123 * It will return KERN_SUCCESS if the mappings were successfully established
1124 * or if they were already established identically by another process.
1127 vm_shared_region_map_file(
1128 vm_shared_region_t shared_region
,
1129 unsigned int mappings_count
,
1130 struct shared_file_mapping_np
*mappings
,
1131 memory_object_control_t file_control
,
1132 memory_object_size_t file_size
,
1135 user_addr_t slide_start
,
1136 user_addr_t slide_size
)
1139 vm_object_t file_object
;
1140 ipc_port_t sr_handle
;
1141 vm_named_entry_t sr_mem_entry
;
1143 mach_vm_offset_t sr_base_address
;
1145 mach_port_t map_port
;
1146 vm_map_offset_t target_address
;
1148 vm_object_size_t obj_size
;
1149 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1150 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1151 mach_vm_offset_t slid_mapping
= (mach_vm_offset_t
) -1;
1152 vm_map_offset_t lowest_unnestable_addr
= 0;
1153 vm_map_kernel_flags_t vmk_flags
;
1154 mach_vm_offset_t sfm_min_address
= ~0;
1155 mach_vm_offset_t sfm_max_address
= 0;
1156 struct _dyld_cache_header sr_cache_header
;
1159 if ((shared_region
->sr_64bit
||
1160 page_shift_user32
== SIXTEENK_PAGE_SHIFT
) &&
1161 ((slide
& SIXTEENK_PAGE_MASK
) != 0)) {
1162 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1163 __FUNCTION__
, slide
);
1164 kr
= KERN_INVALID_ARGUMENT
;
1167 #endif /* __arm64__ */
1171 vm_shared_region_lock();
1172 assert(shared_region
->sr_ref_count
> 1);
1174 if (shared_region
->sr_root_dir
!= root_dir
) {
1176 * This shared region doesn't match the current root
1177 * directory of this process. Deny the mapping to
1178 * avoid tainting the shared region with something that
1179 * doesn't quite belong into it.
1181 vm_shared_region_unlock();
1182 kr
= KERN_PROTECTION_FAILURE
;
1187 * Make sure we handle only one mapping at a time in a given
1188 * shared region, to avoid race conditions. This should not
1189 * happen frequently...
1191 while (shared_region
->sr_mapping_in_progress
) {
1192 /* wait for our turn... */
1193 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1196 assert(!shared_region
->sr_mapping_in_progress
);
1197 assert(shared_region
->sr_ref_count
> 1);
1198 /* let others know we're working in this shared region */
1199 shared_region
->sr_mapping_in_progress
= TRUE
;
1201 vm_shared_region_unlock();
1203 /* no need to lock because this data is never modified... */
1204 sr_handle
= shared_region
->sr_mem_entry
;
1205 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1206 sr_map
= sr_mem_entry
->backing
.map
;
1207 sr_base_address
= shared_region
->sr_base_address
;
1209 SHARED_REGION_TRACE_DEBUG(
1210 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1211 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1212 (void *)VM_KERNEL_ADDRPERM(mappings
),
1213 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1215 /* get the VM object associated with the file to be mapped */
1216 file_object
= memory_object_control_to_vm_object(file_control
);
1218 assert(file_object
);
1220 /* establish the mappings */
1221 for (i
= 0; i
< mappings_count
; i
++) {
1222 SHARED_REGION_TRACE_INFO(
1223 ("shared_region: mapping[%d]: "
1224 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1225 "maxprot:0x%x prot:0x%x\n",
1227 (long long)mappings
[i
].sfm_address
,
1228 (long long)mappings
[i
].sfm_size
,
1229 (long long)mappings
[i
].sfm_file_offset
,
1230 mappings
[i
].sfm_max_prot
,
1231 mappings
[i
].sfm_init_prot
));
1233 if (mappings
[i
].sfm_address
< sfm_min_address
) {
1234 sfm_min_address
= mappings
[i
].sfm_address
;
1237 if ((mappings
[i
].sfm_address
+ mappings
[i
].sfm_size
) > sfm_max_address
) {
1238 sfm_max_address
= mappings
[i
].sfm_address
+ mappings
[i
].sfm_size
;
1241 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1242 /* zero-filled memory */
1243 map_port
= MACH_PORT_NULL
;
1245 /* file-backed memory */
1246 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1249 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1251 * This is the mapping that needs to be slid.
1253 if (mapping_to_slide
!= NULL
) {
1254 SHARED_REGION_TRACE_INFO(
1255 ("shared_region: mapping[%d]: "
1256 "address:0x%016llx size:0x%016llx "
1258 "maxprot:0x%x prot:0x%x "
1259 "will not be slid as only one such mapping is allowed...\n",
1261 (long long)mappings
[i
].sfm_address
,
1262 (long long)mappings
[i
].sfm_size
,
1263 (long long)mappings
[i
].sfm_file_offset
,
1264 mappings
[i
].sfm_max_prot
,
1265 mappings
[i
].sfm_init_prot
));
1267 mapping_to_slide
= &mappings
[i
];
1271 /* mapping's address is relative to the shared region base */
1273 mappings
[i
].sfm_address
- sr_base_address
;
1275 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1276 vmk_flags
.vmkf_already
= TRUE
;
1278 /* establish that mapping, OK if it's "already" there */
1279 if (map_port
== MACH_PORT_NULL
) {
1281 * We want to map some anonymous memory in a
1283 * We have to create the VM object now, so that it
1284 * can be mapped "copy-on-write".
1286 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1287 VM_MAP_PAGE_MASK(sr_map
));
1288 object
= vm_object_allocate(obj_size
);
1289 if (object
== VM_OBJECT_NULL
) {
1290 kr
= KERN_RESOURCE_SHORTAGE
;
1295 vm_map_round_page(mappings
[i
].sfm_size
,
1296 VM_MAP_PAGE_MASK(sr_map
)),
1300 VM_KERN_MEMORY_NONE
,
1304 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1305 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1306 VM_INHERIT_DEFAULT
);
1309 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1310 kr
= vm_map_enter_mem_object(
1313 vm_map_round_page(mappings
[i
].sfm_size
,
1314 VM_MAP_PAGE_MASK(sr_map
)),
1318 VM_KERN_MEMORY_NONE
,
1320 mappings
[i
].sfm_file_offset
,
1322 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1323 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1324 VM_INHERIT_DEFAULT
);
1327 if (kr
== KERN_SUCCESS
) {
1329 * Record the first (chronologically) successful
1330 * mapping in this shared region.
1331 * We're protected by "sr_mapping_in_progress" here,
1332 * so no need to lock "shared_region".
1334 if (first_mapping
== (mach_vm_offset_t
) -1) {
1335 first_mapping
= target_address
;
1338 if ((slid_mapping
== (mach_vm_offset_t
) -1) &&
1339 (mapping_to_slide
== &mappings
[i
])) {
1340 slid_mapping
= target_address
;
1344 * Record the lowest writable address in this
1345 * sub map, to log any unexpected unnesting below
1346 * that address (see log_unnest_badness()).
1348 if ((mappings
[i
].sfm_init_prot
& VM_PROT_WRITE
) &&
1349 sr_map
->is_nested_map
&&
1350 (lowest_unnestable_addr
== 0 ||
1351 (target_address
< lowest_unnestable_addr
))) {
1352 lowest_unnestable_addr
= target_address
;
1355 if (map_port
== MACH_PORT_NULL
) {
1357 * Get rid of the VM object we just created
1358 * but failed to map.
1360 vm_object_deallocate(object
);
1361 object
= VM_OBJECT_NULL
;
1363 if (kr
== KERN_MEMORY_PRESENT
) {
1365 * This exact mapping was already there:
1368 SHARED_REGION_TRACE_INFO(
1369 ("shared_region: mapping[%d]: "
1370 "address:0x%016llx size:0x%016llx "
1372 "maxprot:0x%x prot:0x%x "
1373 "already mapped...\n",
1375 (long long)mappings
[i
].sfm_address
,
1376 (long long)mappings
[i
].sfm_size
,
1377 (long long)mappings
[i
].sfm_file_offset
,
1378 mappings
[i
].sfm_max_prot
,
1379 mappings
[i
].sfm_init_prot
));
1381 * We didn't establish this mapping ourselves;
1382 * let's reset its size, so that we do not
1383 * attempt to undo it if an error occurs later.
1385 mappings
[i
].sfm_size
= 0;
1388 /* this mapping failed ! */
1389 SHARED_REGION_TRACE_ERROR(
1390 ("shared_region: mapping[%d]: "
1391 "address:0x%016llx size:0x%016llx "
1393 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1395 (long long)mappings
[i
].sfm_address
,
1396 (long long)mappings
[i
].sfm_size
,
1397 (long long)mappings
[i
].sfm_file_offset
,
1398 mappings
[i
].sfm_max_prot
,
1399 mappings
[i
].sfm_init_prot
,
1402 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1408 if (kr
== KERN_SUCCESS
&&
1410 mapping_to_slide
!= NULL
) {
1411 kr
= vm_shared_region_slide(slide
,
1412 mapping_to_slide
->sfm_file_offset
,
1413 mapping_to_slide
->sfm_size
,
1418 if (kr
!= KERN_SUCCESS
) {
1419 SHARED_REGION_TRACE_ERROR(
1420 ("shared_region: region_slide("
1421 "slide:0x%x start:0x%016llx "
1422 "size:0x%016llx) failed 0x%x\n",
1424 (long long)slide_start
,
1425 (long long)slide_size
,
1427 vm_shared_region_undo_mappings(sr_map
,
1434 if (kr
== KERN_SUCCESS
) {
1435 /* adjust the map's "lowest_unnestable_start" */
1436 lowest_unnestable_addr
&= ~(pmap_nesting_size_min
- 1);
1437 if (lowest_unnestable_addr
!=
1438 sr_map
->lowest_unnestable_start
) {
1439 vm_map_lock(sr_map
);
1440 sr_map
->lowest_unnestable_start
=
1441 lowest_unnestable_addr
;
1442 vm_map_unlock(sr_map
);
1446 vm_shared_region_lock();
1447 assert(shared_region
->sr_ref_count
> 1);
1448 assert(shared_region
->sr_mapping_in_progress
);
1450 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1451 if (kr
== KERN_SUCCESS
&&
1452 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1453 shared_region
->sr_first_mapping
= first_mapping
;
1457 * copy in the shared region UUID to the shared region structure.
1458 * we do this indirectly by first copying in the shared cache header
1459 * and then copying the UUID from there because we'll need to look
1460 * at other content from the shared cache header.
1462 if (kr
== KERN_SUCCESS
&& !shared_region
->sr_uuid_copied
) {
1463 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
),
1464 (char *)&sr_cache_header
,
1465 sizeof(sr_cache_header
));
1467 memcpy(&shared_region
->sr_uuid
, &sr_cache_header
.uuid
, sizeof(shared_region
->sr_uuid
));
1468 shared_region
->sr_uuid_copied
= TRUE
;
1470 #if DEVELOPMENT || DEBUG
1471 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1472 "offset:0 size:0x%016llx) failed with %d\n",
1473 (long long)shared_region
->sr_base_address
,
1474 (long long)shared_region
->sr_first_mapping
,
1475 (long long)sizeof(sr_cache_header
),
1477 #endif /* DEVELOPMENT || DEBUG */
1478 shared_region
->sr_uuid_copied
= FALSE
;
1483 * If the shared cache is associated with the init task (and is therefore the system shared cache),
1484 * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
1486 boolean_t is_init_task
= (task_pid(current_task()) == 1);
1487 if (shared_region
->sr_uuid_copied
&& is_init_task
) {
1488 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1489 if (sr_cache_header
.locallyBuiltCache
) {
1490 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_START
);
1491 size_t image_array_length
= (sr_cache_header
.imagesTextCount
* sizeof(struct _dyld_cache_image_text_info
));
1492 struct _dyld_cache_image_text_info
*sr_image_layout
= kalloc(image_array_length
);
1493 int error
= copyin((shared_region
->sr_base_address
+ shared_region
->sr_first_mapping
+
1494 sr_cache_header
.imagesTextOffset
), (char *)sr_image_layout
, image_array_length
);
1496 shared_region
->sr_images
= kalloc(sr_cache_header
.imagesTextCount
* sizeof(struct dyld_uuid_info_64
));
1497 for (size_t index
= 0; index
< sr_cache_header
.imagesTextCount
; index
++) {
1498 memcpy((char *)&shared_region
->sr_images
[index
].imageUUID
, (char *)&sr_image_layout
[index
].uuid
,
1499 sizeof(shared_region
->sr_images
[index
].imageUUID
));
1500 shared_region
->sr_images
[index
].imageLoadAddress
= sr_image_layout
[index
].loadAddress
;
1503 assert(sr_cache_header
.imagesTextCount
< UINT32_MAX
);
1504 shared_region
->sr_images_count
= (uint32_t) sr_cache_header
.imagesTextCount
;
1506 #if DEVELOPMENT || DEBUG
1507 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1508 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1509 (long long)shared_region
->sr_base_address
,
1510 (long long)shared_region
->sr_first_mapping
,
1511 (long long)sr_cache_header
.imagesTextOffset
,
1512 (long long)image_array_length
,
1514 #endif /* DEVELOPMENT || DEBUG */
1516 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION
, PROCESS_SHARED_CACHE_LAYOUT
)) | DBG_FUNC_END
, shared_region
->sr_images_count
);
1517 kfree(sr_image_layout
, image_array_length
);
1518 sr_image_layout
= NULL
;
1520 init_task_shared_region
= shared_region
;
1523 if (kr
== KERN_SUCCESS
) {
1525 * If we succeeded, we know the bounds of the shared region.
1526 * Trim our pmaps to only cover this range (if applicable to
1529 pmap_trim(current_map()->pmap
, sr_map
->pmap
, sfm_min_address
, sfm_min_address
, sfm_max_address
- sfm_min_address
);
1532 /* we're done working on that shared region */
1533 shared_region
->sr_mapping_in_progress
= FALSE
;
1534 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1535 vm_shared_region_unlock();
1538 SHARED_REGION_TRACE_DEBUG(
1539 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1540 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1541 (void *)VM_KERNEL_ADDRPERM(mappings
),
1542 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1547 * Retrieve a task's shared region and grab an extra reference to
1548 * make sure it doesn't disappear while the caller is using it.
1549 * The caller is responsible for consuming that extra reference if
1552 * This also tries to trim the pmap for the shared region.
1555 vm_shared_region_trim_and_get(task_t task
)
1557 vm_shared_region_t shared_region
;
1558 ipc_port_t sr_handle
;
1559 vm_named_entry_t sr_mem_entry
;
1562 /* Get the shared region and the map. */
1563 shared_region
= vm_shared_region_get(task
);
1564 if (shared_region
== NULL
) {
1568 sr_handle
= shared_region
->sr_mem_entry
;
1569 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1570 sr_map
= sr_mem_entry
->backing
.map
;
1572 /* Trim the pmap if possible. */
1573 pmap_trim(task
->map
->pmap
, sr_map
->pmap
, 0, 0, 0);
1575 return shared_region
;
1579 * Enter the appropriate shared region into "map" for "task".
1580 * This involves looking up the shared region (and possibly creating a new
1581 * one) for the desired environment, then mapping the VM sub map into the
1582 * task's VM "map", with the appropriate level of pmap-nesting.
1585 vm_shared_region_enter(
1586 struct _vm_map
*map
,
1591 cpu_subtype_t cpu_subtype
)
1594 vm_shared_region_t shared_region
;
1595 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1596 vm_map_size_t sr_size
, mapping_size
;
1597 vm_map_offset_t sr_pmap_nesting_start
;
1598 vm_map_size_t sr_pmap_nesting_size
;
1599 ipc_port_t sr_handle
;
1600 vm_prot_t cur_prot
, max_prot
;
1602 SHARED_REGION_TRACE_DEBUG(
1603 ("shared_region: -> "
1604 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1605 (void *)VM_KERNEL_ADDRPERM(map
),
1606 (void *)VM_KERNEL_ADDRPERM(task
),
1607 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1608 cpu
, cpu_subtype
, is_64bit
));
1610 /* lookup (create if needed) the shared region for this environment */
1611 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, cpu_subtype
, is_64bit
);
1612 if (shared_region
== NULL
) {
1613 /* this should not happen ! */
1614 SHARED_REGION_TRACE_ERROR(
1615 ("shared_region: -> "
1616 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): "
1617 "lookup failed !\n",
1618 (void *)VM_KERNEL_ADDRPERM(map
),
1619 (void *)VM_KERNEL_ADDRPERM(task
),
1620 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1621 cpu
, cpu_subtype
, is_64bit
));
1622 //panic("shared_region_enter: lookup failed\n");
1623 return KERN_FAILURE
;
1627 /* no need to lock since this data is never modified */
1628 sr_address
= shared_region
->sr_base_address
;
1629 sr_size
= shared_region
->sr_size
;
1630 sr_handle
= shared_region
->sr_mem_entry
;
1631 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1632 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1634 cur_prot
= VM_PROT_READ
;
1637 * XXX BINARY COMPATIBILITY
1638 * java6 apparently needs to modify some code in the
1639 * dyld shared cache and needs to be allowed to add
1642 max_prot
= VM_PROT_ALL
;
1643 #else /* __x86_64__ */
1644 max_prot
= VM_PROT_READ
;
1645 #endif /* __x86_64__ */
1647 * Start mapping the shared region's VM sub map into the task's VM map.
1651 if (sr_pmap_nesting_start
> sr_address
) {
1652 /* we need to map a range without pmap-nesting first */
1653 target_address
= sr_address
;
1654 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1655 kr
= vm_map_enter_mem_object(
1661 VM_MAP_KERNEL_FLAGS_NONE
,
1662 VM_KERN_MEMORY_NONE
,
1669 if (kr
!= KERN_SUCCESS
) {
1670 SHARED_REGION_TRACE_ERROR(
1671 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1672 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1673 (void *)VM_KERNEL_ADDRPERM(map
),
1674 (void *)VM_KERNEL_ADDRPERM(task
),
1675 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1676 cpu
, cpu_subtype
, is_64bit
,
1677 (long long)target_address
,
1678 (long long)mapping_size
,
1679 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1682 SHARED_REGION_TRACE_DEBUG(
1683 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1684 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1685 (void *)VM_KERNEL_ADDRPERM(map
),
1686 (void *)VM_KERNEL_ADDRPERM(task
),
1687 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1688 cpu
, cpu_subtype
, is_64bit
,
1689 (long long)target_address
, (long long)mapping_size
,
1690 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1691 sr_offset
+= mapping_size
;
1692 sr_size
-= mapping_size
;
1695 * We may need to map several pmap-nested portions, due to platform
1696 * specific restrictions on pmap nesting.
1697 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1700 sr_pmap_nesting_size
> 0;
1701 sr_offset
+= mapping_size
,
1702 sr_size
-= mapping_size
,
1703 sr_pmap_nesting_size
-= mapping_size
) {
1704 target_address
= sr_address
+ sr_offset
;
1705 mapping_size
= sr_pmap_nesting_size
;
1706 if (mapping_size
> pmap_nesting_size_max
) {
1707 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1709 kr
= vm_map_enter_mem_object(
1715 VM_MAP_KERNEL_FLAGS_NONE
,
1716 VM_MEMORY_SHARED_PMAP
,
1723 if (kr
!= KERN_SUCCESS
) {
1724 SHARED_REGION_TRACE_ERROR(
1725 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1726 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1727 (void *)VM_KERNEL_ADDRPERM(map
),
1728 (void *)VM_KERNEL_ADDRPERM(task
),
1729 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1730 cpu
, cpu_subtype
, is_64bit
,
1731 (long long)target_address
,
1732 (long long)mapping_size
,
1733 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1736 SHARED_REGION_TRACE_DEBUG(
1737 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1738 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1739 (void *)VM_KERNEL_ADDRPERM(map
),
1740 (void *)VM_KERNEL_ADDRPERM(task
),
1741 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1742 cpu
, cpu_subtype
, is_64bit
,
1743 (long long)target_address
, (long long)mapping_size
,
1744 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1747 /* and there's some left to be mapped without pmap-nesting */
1748 target_address
= sr_address
+ sr_offset
;
1749 mapping_size
= sr_size
;
1750 kr
= vm_map_enter_mem_object(
1756 VM_MAP_KERNEL_FLAGS_NONE
,
1757 VM_KERN_MEMORY_NONE
,
1764 if (kr
!= KERN_SUCCESS
) {
1765 SHARED_REGION_TRACE_ERROR(
1766 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1767 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1768 (void *)VM_KERNEL_ADDRPERM(map
),
1769 (void *)VM_KERNEL_ADDRPERM(task
),
1770 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1771 cpu
, cpu_subtype
, is_64bit
,
1772 (long long)target_address
,
1773 (long long)mapping_size
,
1774 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1777 SHARED_REGION_TRACE_DEBUG(
1778 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1779 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1780 (void *)VM_KERNEL_ADDRPERM(map
),
1781 (void *)VM_KERNEL_ADDRPERM(task
),
1782 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1783 cpu
, cpu_subtype
, is_64bit
,
1784 (long long)target_address
, (long long)mapping_size
,
1785 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1786 sr_offset
+= mapping_size
;
1787 sr_size
-= mapping_size
;
1789 assert(sr_size
== 0);
1792 if (kr
== KERN_SUCCESS
) {
1793 /* let the task use that shared region */
1794 vm_shared_region_set(task
, shared_region
);
1796 /* drop our reference since we're not using it */
1797 vm_shared_region_deallocate(shared_region
);
1798 vm_shared_region_set(task
, NULL
);
1801 SHARED_REGION_TRACE_DEBUG(
1802 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
1803 (void *)VM_KERNEL_ADDRPERM(map
),
1804 (void *)VM_KERNEL_ADDRPERM(task
),
1805 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1806 cpu
, cpu_subtype
, is_64bit
, kr
));
1810 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1811 struct vm_shared_region_slide_info slide_info
;
1814 vm_shared_region_sliding_valid(uint32_t slide
)
1816 kern_return_t kr
= KERN_SUCCESS
;
1817 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1819 /* No region yet? we're fine. */
1824 if ((sr
->sr_slid
== TRUE
) && slide
) {
1825 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1826 printf("Only one shared region can be slid\n");
1830 * Request for sliding when we've
1831 * already done it with exactly the
1832 * same slide value before.
1833 * This isn't wrong technically but
1834 * we don't want to slide again and
1835 * so we return this value.
1837 kr
= KERN_INVALID_ARGUMENT
;
1840 vm_shared_region_deallocate(sr
);
1845 vm_shared_region_slide_mapping(
1846 vm_shared_region_t sr
,
1847 mach_vm_size_t slide_info_size
,
1848 mach_vm_offset_t start
,
1849 mach_vm_size_t size
,
1850 mach_vm_offset_t slid_mapping
,
1852 memory_object_control_t sr_file_control
)
1856 vm_shared_region_slide_info_t si
;
1857 vm_offset_t slide_info_entry
;
1858 vm_map_entry_t slid_entry
, tmp_entry
;
1859 struct vm_map_entry tmp_entry_store
;
1860 memory_object_t sr_pager
;
1863 vm_map_kernel_flags_t vmk_flags
;
1864 vm_map_offset_t map_addr
;
1866 tmp_entry
= VM_MAP_ENTRY_NULL
;
1867 sr_pager
= MEMORY_OBJECT_NULL
;
1868 object
= VM_OBJECT_NULL
;
1869 slide_info_entry
= 0;
1871 assert(sr
->sr_slide_in_progress
);
1872 assert(!sr
->sr_slid
);
1874 si
= vm_shared_region_get_slide_info(sr
);
1875 assert(si
->slide_object
== VM_OBJECT_NULL
);
1876 assert(si
->slide_info_entry
== NULL
);
1878 if (sr_file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1879 return KERN_INVALID_ARGUMENT
;
1881 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1882 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1883 return KERN_FAILURE
;
1886 kr
= kmem_alloc(kernel_map
,
1887 (vm_offset_t
*) &slide_info_entry
,
1888 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1889 if (kr
!= KERN_SUCCESS
) {
1893 object
= memory_object_control_to_vm_object(sr_file_control
);
1894 if (object
== VM_OBJECT_NULL
|| object
->internal
) {
1895 object
= VM_OBJECT_NULL
;
1896 kr
= KERN_INVALID_ADDRESS
;
1900 vm_object_lock(object
);
1901 vm_object_reference_locked(object
); /* for si->slide_object */
1902 object
->object_is_shared_cache
= TRUE
;
1903 vm_object_unlock(object
);
1905 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1906 si
->slide_info_size
= slide_info_size
;
1908 assert(slid_mapping
!= (mach_vm_offset_t
) -1);
1909 si
->slid_address
= slid_mapping
+ sr
->sr_base_address
;
1910 si
->slide_object
= object
;
1912 si
->end
= si
->start
+ size
;
1915 /* find the shared region's map entry to slide */
1916 sr_map
= vm_shared_region_vm_map(sr
);
1917 vm_map_lock_read(sr_map
);
1918 if (!vm_map_lookup_entry(sr_map
,
1921 /* no mapping there */
1922 vm_map_unlock(sr_map
);
1923 kr
= KERN_INVALID_ARGUMENT
;
1927 * We might want to clip the entry to cover only the portion that
1928 * needs sliding (offsets si->start to si->end in the shared cache
1929 * file at the bottom of the shadow chain).
1930 * In practice, it seems to cover the entire DATA segment...
1932 tmp_entry_store
= *slid_entry
;
1933 tmp_entry
= &tmp_entry_store
;
1934 slid_entry
= VM_MAP_ENTRY_NULL
;
1935 /* extra ref to keep object alive while map is unlocked */
1936 vm_object_reference(VME_OBJECT(tmp_entry
));
1937 vm_map_unlock_read(sr_map
);
1939 /* create a "shared_region" sliding pager */
1940 sr_pager
= shared_region_pager_setup(VME_OBJECT(tmp_entry
),
1941 VME_OFFSET(tmp_entry
),
1943 if (sr_pager
== NULL
) {
1944 kr
= KERN_RESOURCE_SHORTAGE
;
1948 /* map that pager over the portion of the mapping that needs sliding */
1949 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
1950 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1951 vmk_flags
.vmkf_overwrite_immutable
= TRUE
;
1952 map_addr
= tmp_entry
->vme_start
;
1953 kr
= vm_map_enter_mem_object(sr_map
,
1955 (tmp_entry
->vme_end
-
1956 tmp_entry
->vme_start
),
1957 (mach_vm_offset_t
) 0,
1960 VM_KERN_MEMORY_NONE
,
1961 (ipc_port_t
)(uintptr_t) sr_pager
,
1964 tmp_entry
->protection
,
1965 tmp_entry
->max_protection
,
1966 tmp_entry
->inheritance
);
1967 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x\n", kr
);
1968 assertf(map_addr
== tmp_entry
->vme_start
,
1969 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
1971 (uint64_t) tmp_entry
->vme_start
,
1980 * Release the sr_pager reference obtained by
1981 * shared_region_pager_setup().
1982 * The mapping (if it succeeded) is now holding a reference on
1983 * the memory object.
1985 memory_object_deallocate(sr_pager
);
1986 sr_pager
= MEMORY_OBJECT_NULL
;
1989 /* release extra ref on tmp_entry's VM object */
1990 vm_object_deallocate(VME_OBJECT(tmp_entry
));
1991 tmp_entry
= VM_MAP_ENTRY_NULL
;
1994 if (kr
!= KERN_SUCCESS
) {
1996 if (slide_info_entry
) {
1997 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
1998 slide_info_entry
= 0;
2000 if (si
->slide_object
) {
2001 vm_object_deallocate(si
->slide_object
);
2002 si
->slide_object
= VM_OBJECT_NULL
;
2009 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
)
2011 return (void*)sr
->sr_slide_info
.slide_info_entry
;
2014 static kern_return_t
2015 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info
)
2017 uint32_t pageIndex
= 0;
2018 uint16_t entryIndex
= 0;
2019 uint16_t *toc
= NULL
;
2021 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2022 for (; pageIndex
< s_info
->toc_count
; pageIndex
++) {
2023 entryIndex
= (uint16_t)(toc
[pageIndex
]);
2025 if (entryIndex
>= s_info
->entry_count
) {
2026 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
2027 return KERN_FAILURE
;
2030 return KERN_SUCCESS
;
2033 static kern_return_t
2034 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info
, mach_vm_size_t slide_info_size
)
2036 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2037 return KERN_FAILURE
;
2040 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2042 uint32_t page_starts_count
= s_info
->page_starts_count
;
2043 uint32_t page_extras_count
= s_info
->page_extras_count
;
2044 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2045 if (num_trailing_entries
< page_starts_count
) {
2046 return KERN_FAILURE
;
2049 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2050 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2051 if (trailing_size
>> 1 != num_trailing_entries
) {
2052 return KERN_FAILURE
;
2055 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2056 if (required_size
< sizeof(*s_info
)) {
2057 return KERN_FAILURE
;
2060 if (required_size
> slide_info_size
) {
2061 return KERN_FAILURE
;
2064 return KERN_SUCCESS
;
2067 static kern_return_t
2068 vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info
, mach_vm_size_t slide_info_size
)
2070 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2071 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info
->page_size
, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE
);
2072 return KERN_FAILURE
;
2075 uint32_t page_starts_count
= s_info
->page_starts_count
;
2076 mach_vm_size_t num_trailing_entries
= page_starts_count
;
2077 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2078 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2079 if (required_size
< sizeof(*s_info
)) {
2080 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)sizeof(*s_info
));
2081 return KERN_FAILURE
;
2084 if (required_size
> slide_info_size
) {
2085 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size
, (uint64_t)slide_info_size
);
2086 return KERN_FAILURE
;
2089 return KERN_SUCCESS
;
2092 static kern_return_t
2093 vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info
, mach_vm_size_t slide_info_size
)
2095 if (s_info
->page_size
!= PAGE_SIZE_FOR_SR_SLIDE
) {
2096 return KERN_FAILURE
;
2099 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2101 uint32_t page_starts_count
= s_info
->page_starts_count
;
2102 uint32_t page_extras_count
= s_info
->page_extras_count
;
2103 mach_vm_size_t num_trailing_entries
= page_starts_count
+ page_extras_count
;
2104 if (num_trailing_entries
< page_starts_count
) {
2105 return KERN_FAILURE
;
2108 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2109 mach_vm_size_t trailing_size
= num_trailing_entries
<< 1;
2110 if (trailing_size
>> 1 != num_trailing_entries
) {
2111 return KERN_FAILURE
;
2114 mach_vm_size_t required_size
= sizeof(*s_info
) + trailing_size
;
2115 if (required_size
< sizeof(*s_info
)) {
2116 return KERN_FAILURE
;
2119 if (required_size
> slide_info_size
) {
2120 return KERN_FAILURE
;
2123 return KERN_SUCCESS
;
2128 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
2130 vm_shared_region_slide_info_t si
;
2131 vm_shared_region_slide_info_entry_t s_info
;
2134 si
= vm_shared_region_get_slide_info(sr
);
2135 s_info
= si
->slide_info_entry
;
2137 kr
= mach_vm_protect(kernel_map
,
2138 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
2139 (mach_vm_size_t
) si
->slide_info_size
,
2140 TRUE
, VM_PROT_READ
);
2141 if (kr
!= KERN_SUCCESS
) {
2142 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
2145 if (s_info
->version
== 1) {
2146 kr
= vm_shared_region_slide_sanity_check_v1(&s_info
->v1
);
2147 } else if (s_info
->version
== 2) {
2148 kr
= vm_shared_region_slide_sanity_check_v2(&s_info
->v2
, si
->slide_info_size
);
2149 } else if (s_info
->version
== 3) {
2150 kr
= vm_shared_region_slide_sanity_check_v3(&s_info
->v3
, si
->slide_info_size
);
2151 } else if (s_info
->version
== 4) {
2152 kr
= vm_shared_region_slide_sanity_check_v4(&s_info
->v4
, si
->slide_info_size
);
2156 if (kr
!= KERN_SUCCESS
) {
2160 return KERN_SUCCESS
;
2162 if (si
->slide_info_entry
!= NULL
) {
2163 kmem_free(kernel_map
,
2164 (vm_offset_t
) si
->slide_info_entry
,
2165 (vm_size_t
) si
->slide_info_size
);
2167 vm_object_deallocate(si
->slide_object
);
2168 si
->slide_object
= NULL
;
2172 si
->slide_info_entry
= NULL
;
2173 si
->slide_info_size
= 0;
2175 return KERN_FAILURE
;
2178 static kern_return_t
2179 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2181 uint16_t *toc
= NULL
;
2182 slide_info_entry_toc_t bitmap
= NULL
;
2183 uint32_t i
= 0, j
= 0;
2185 uint32_t slide
= si
->slide
;
2186 int is_64
= task_has_64Bit_addr(current_task());
2188 vm_shared_region_slide_info_entry_v1_t s_info
= &si
->slide_info_entry
->v1
;
2189 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
2191 if (pageIndex
>= s_info
->toc_count
) {
2192 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
2194 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
2195 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
2197 if (entryIndex
>= s_info
->entry_count
) {
2198 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
2200 bitmap
= &slide_info_entries
[entryIndex
];
2202 for (i
= 0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
2203 b
= bitmap
->entry
[i
];
2205 for (j
= 0; j
< 8; ++j
) {
2207 uint32_t *ptr_to_slide
;
2210 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
) + (sizeof(uint32_t) * (i
* 8 + j
)));
2211 old_value
= *ptr_to_slide
;
2212 *ptr_to_slide
+= slide
;
2213 if (is_64
&& *ptr_to_slide
< old_value
) {
2215 * We just slid the low 32 bits of a 64-bit pointer
2216 * and it looks like there should have been a carry-over
2217 * to the upper 32 bits.
2218 * The sliding failed...
2220 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
2221 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
2222 return KERN_FAILURE
;
2231 return KERN_SUCCESS
;
2234 static kern_return_t
2236 uint8_t *page_content
,
2237 uint16_t start_offset
,
2238 uint32_t slide_amount
,
2239 vm_shared_region_slide_info_entry_v2_t s_info
)
2241 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2243 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2244 const uint32_t value_mask
= ~delta_mask
;
2245 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2246 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2248 uint32_t page_offset
= start_offset
;
2251 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2255 loc
= page_content
+ page_offset
;
2256 memcpy(&value
, loc
, sizeof(value
));
2257 delta
= (value
& delta_mask
) >> delta_shift
;
2258 value
&= value_mask
;
2262 value
+= slide_amount
;
2264 memcpy(loc
, &value
, sizeof(value
));
2265 page_offset
+= delta
;
2268 /* If the offset went past the end of the page, then the slide data is invalid. */
2269 if (page_offset
> last_page_offset
) {
2270 return KERN_FAILURE
;
2272 return KERN_SUCCESS
;
2275 static kern_return_t
2277 uint8_t *page_content
,
2278 uint16_t start_offset
,
2279 uint32_t slide_amount
,
2280 vm_shared_region_slide_info_entry_v2_t s_info
)
2282 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint64_t);
2284 const uint64_t delta_mask
= s_info
->delta_mask
;
2285 const uint64_t value_mask
= ~delta_mask
;
2286 const uint64_t value_add
= s_info
->value_add
;
2287 const uint64_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2289 uint32_t page_offset
= start_offset
;
2292 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2296 loc
= page_content
+ page_offset
;
2297 memcpy(&value
, loc
, sizeof(value
));
2298 delta
= (uint32_t)((value
& delta_mask
) >> delta_shift
);
2299 value
&= value_mask
;
2303 value
+= slide_amount
;
2305 memcpy(loc
, &value
, sizeof(value
));
2306 page_offset
+= delta
;
2309 if (page_offset
+ sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE
) {
2310 /* If a pointer straddling the page boundary needs to be adjusted, then
2311 * add the slide to the lower half. The encoding guarantees that the upper
2312 * half on the next page will need no masking.
2314 * This assumes a little-endian machine and that the region being slid
2315 * never crosses a 4 GB boundary. */
2317 uint8_t *loc
= page_content
+ page_offset
;
2320 memcpy(&value
, loc
, sizeof(value
));
2321 value
+= slide_amount
;
2322 memcpy(loc
, &value
, sizeof(value
));
2323 } else if (page_offset
> last_page_offset
) {
2324 return KERN_FAILURE
;
2327 return KERN_SUCCESS
;
2330 static kern_return_t
2334 uint8_t *page_content
,
2335 uint16_t start_offset
,
2336 uint32_t slide_amount
,
2337 vm_shared_region_slide_info_entry_v2_t s_info
)
2341 kr
= rebase_chain_64(page_content
, start_offset
, slide_amount
, s_info
);
2343 kr
= rebase_chain_32(page_content
, start_offset
, slide_amount
, s_info
);
2346 if (kr
!= KERN_SUCCESS
) {
2347 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2348 pageIndex
, start_offset
, slide_amount
);
2353 static kern_return_t
2354 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2356 vm_shared_region_slide_info_entry_v2_t s_info
= &si
->slide_info_entry
->v2
;
2357 const uint32_t slide_amount
= si
->slide
;
2359 /* The high bits of the delta_mask field are nonzero precisely when the shared
2360 * cache is 64-bit. */
2361 const boolean_t is_64
= (s_info
->delta_mask
>> 32) != 0;
2363 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2364 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2366 uint8_t *page_content
= (uint8_t *)vaddr
;
2367 uint16_t page_entry
;
2369 if (pageIndex
>= s_info
->page_starts_count
) {
2370 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2371 pageIndex
, s_info
->page_starts_count
);
2372 return KERN_FAILURE
;
2374 page_entry
= page_starts
[pageIndex
];
2376 if (page_entry
== DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE
) {
2377 return KERN_SUCCESS
;
2380 if (page_entry
& DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA
) {
2381 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE_PAGE_VALUE
;
2385 uint16_t page_start_offset
;
2388 if (chain_index
>= s_info
->page_extras_count
) {
2389 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2390 chain_index
, s_info
->page_extras_count
);
2391 return KERN_FAILURE
;
2393 info
= page_extras
[chain_index
];
2394 page_start_offset
= (info
& DYLD_CACHE_SLIDE_PAGE_VALUE
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2396 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2397 if (kr
!= KERN_SUCCESS
) {
2398 return KERN_FAILURE
;
2402 } while (!(info
& DYLD_CACHE_SLIDE_PAGE_ATTR_END
));
2404 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2407 kr
= rebase_chain(is_64
, pageIndex
, page_content
, page_start_offset
, slide_amount
, s_info
);
2408 if (kr
!= KERN_SUCCESS
) {
2409 return KERN_FAILURE
;
2413 return KERN_SUCCESS
;
2417 static kern_return_t
2418 vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, __unused mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2420 vm_shared_region_slide_info_entry_v3_t s_info
= &si
->slide_info_entry
->v3
;
2421 const uint32_t slide_amount
= si
->slide
;
2423 uint8_t *page_content
= (uint8_t *)vaddr
;
2424 uint16_t page_entry
;
2426 if (pageIndex
>= s_info
->page_starts_count
) {
2427 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2428 pageIndex
, s_info
->page_starts_count
);
2429 return KERN_FAILURE
;
2431 page_entry
= s_info
->page_starts
[pageIndex
];
2433 if (page_entry
== DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE
) {
2434 return KERN_SUCCESS
;
2437 uint8_t* rebaseLocation
= page_content
;
2438 uint64_t delta
= page_entry
;
2440 rebaseLocation
+= delta
;
2442 memcpy(&value
, rebaseLocation
, sizeof(value
));
2443 delta
= ((value
& 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2445 // A pointer is one of :
2447 // uint64_t pointerValue : 51;
2448 // uint64_t offsetToNextPointer : 11;
2449 // uint64_t isBind : 1 = 0;
2450 // uint64_t authenticated : 1 = 0;
2453 // uint32_t offsetFromSharedCacheBase;
2454 // uint16_t diversityData;
2455 // uint16_t hasAddressDiversity : 1;
2456 // uint16_t hasDKey : 1;
2457 // uint16_t hasBKey : 1;
2458 // uint16_t offsetToNextPointer : 11;
2459 // uint16_t isBind : 1;
2460 // uint16_t authenticated : 1 = 1;
2463 bool isBind
= (value
& (1ULL << 62)) == 1;
2465 return KERN_FAILURE
;
2468 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
2470 if (isAuthenticated
) {
2471 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2472 value
= (value
& 0xFFFFFFFF) + slide_amount
;
2473 // Add in the offset from the mach_header
2474 const uint64_t value_add
= s_info
->value_add
;
2478 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2479 // Regular pointer which needs to fit in 51-bits of value.
2480 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2481 // and the bottom 43-bits to be fit in to 51-bits.
2482 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
2483 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
2484 uint64_t targetValue
= (top8Bits
<< 13) | bottom43Bits
;
2485 value
= targetValue
+ slide_amount
;
2488 memcpy(rebaseLocation
, &value
, sizeof(value
));
2489 } while (delta
!= 0);
2491 return KERN_SUCCESS
;
2494 static kern_return_t
2496 uint8_t *page_content
,
2497 uint16_t start_offset
,
2498 uint32_t slide_amount
,
2499 vm_shared_region_slide_info_entry_v4_t s_info
)
2501 const uint32_t last_page_offset
= PAGE_SIZE_FOR_SR_SLIDE
- sizeof(uint32_t);
2503 const uint32_t delta_mask
= (uint32_t)(s_info
->delta_mask
);
2504 const uint32_t value_mask
= ~delta_mask
;
2505 const uint32_t value_add
= (uint32_t)(s_info
->value_add
);
2506 const uint32_t delta_shift
= __builtin_ctzll(delta_mask
) - 2;
2508 uint32_t page_offset
= start_offset
;
2511 while (delta
!= 0 && page_offset
<= last_page_offset
) {
2515 loc
= page_content
+ page_offset
;
2516 memcpy(&value
, loc
, sizeof(value
));
2517 delta
= (value
& delta_mask
) >> delta_shift
;
2518 value
&= value_mask
;
2520 if ((value
& 0xFFFF8000) == 0) {
2521 // small positive non-pointer, use as-is
2522 } else if ((value
& 0x3FFF8000) == 0x3FFF8000) {
2523 // small negative non-pointer
2524 value
|= 0xC0000000;
2526 // pointer that needs rebasing
2528 value
+= slide_amount
;
2530 memcpy(loc
, &value
, sizeof(value
));
2531 page_offset
+= delta
;
2534 /* If the offset went past the end of the page, then the slide data is invalid. */
2535 if (page_offset
> last_page_offset
) {
2536 return KERN_FAILURE
;
2538 return KERN_SUCCESS
;
2541 static kern_return_t
2542 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
2544 vm_shared_region_slide_info_entry_v4_t s_info
= &si
->slide_info_entry
->v4
;
2545 const uint32_t slide_amount
= si
->slide
;
2547 const uint16_t *page_starts
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_starts_offset
);
2548 const uint16_t *page_extras
= (uint16_t *)((uintptr_t)s_info
+ s_info
->page_extras_offset
);
2550 uint8_t *page_content
= (uint8_t *)vaddr
;
2551 uint16_t page_entry
;
2553 if (pageIndex
>= s_info
->page_starts_count
) {
2554 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2555 pageIndex
, s_info
->page_starts_count
);
2556 return KERN_FAILURE
;
2558 page_entry
= page_starts
[pageIndex
];
2560 if (page_entry
== DYLD_CACHE_SLIDE4_PAGE_NO_REBASE
) {
2561 return KERN_SUCCESS
;
2564 if (page_entry
& DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA
) {
2565 uint16_t chain_index
= page_entry
& DYLD_CACHE_SLIDE4_PAGE_INDEX
;
2569 uint16_t page_start_offset
;
2572 if (chain_index
>= s_info
->page_extras_count
) {
2573 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2574 chain_index
, s_info
->page_extras_count
);
2575 return KERN_FAILURE
;
2577 info
= page_extras
[chain_index
];
2578 page_start_offset
= (info
& DYLD_CACHE_SLIDE4_PAGE_INDEX
) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2580 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2581 if (kr
!= KERN_SUCCESS
) {
2582 return KERN_FAILURE
;
2586 } while (!(info
& DYLD_CACHE_SLIDE4_PAGE_EXTRA_END
));
2588 const uint32_t page_start_offset
= page_entry
<< DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT
;
2591 kr
= rebase_chainv4(page_content
, page_start_offset
, slide_amount
, s_info
);
2592 if (kr
!= KERN_SUCCESS
) {
2593 return KERN_FAILURE
;
2597 return KERN_SUCCESS
;
2603 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, mach_vm_offset_t uservaddr
, uint32_t pageIndex
)
2605 if (si
->slide_info_entry
->version
== 1) {
2606 return vm_shared_region_slide_page_v1(si
, vaddr
, pageIndex
);
2607 } else if (si
->slide_info_entry
->version
== 2) {
2608 return vm_shared_region_slide_page_v2(si
, vaddr
, pageIndex
);
2609 } else if (si
->slide_info_entry
->version
== 3) {
2610 return vm_shared_region_slide_page_v3(si
, vaddr
, uservaddr
, pageIndex
);
2611 } else if (si
->slide_info_entry
->version
== 4) {
2612 return vm_shared_region_slide_page_v4(si
, vaddr
, pageIndex
);
2614 return KERN_FAILURE
;
2618 /******************************************************************************/
2619 /* Comm page support */
2620 /******************************************************************************/
2622 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
2623 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
2624 vm_named_entry_t commpage32_entry
= NULL
;
2625 vm_named_entry_t commpage64_entry
= NULL
;
2626 vm_map_t commpage32_map
= VM_MAP_NULL
;
2627 vm_map_t commpage64_map
= VM_MAP_NULL
;
2629 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
2630 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
2631 vm_named_entry_t commpage_text32_entry
= NULL
;
2632 vm_named_entry_t commpage_text64_entry
= NULL
;
2633 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
2634 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
2636 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
2637 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
2639 #if defined(__i386__) || defined(__x86_64__)
2641 * Create a memory entry, VM submap and pmap for one commpage.
2645 ipc_port_t
*handlep
,
2649 vm_named_entry_t mem_entry
;
2652 SHARED_REGION_TRACE_DEBUG(
2653 ("commpage: -> _init(0x%llx)\n",
2656 kr
= mach_memory_entry_allocate(&mem_entry
,
2658 if (kr
!= KERN_SUCCESS
) {
2659 panic("_vm_commpage_init: could not allocate mem_entry");
2661 new_map
= vm_map_create(pmap_create(NULL
, 0, 0), 0, size
, TRUE
);
2662 if (new_map
== VM_MAP_NULL
) {
2663 panic("_vm_commpage_init: could not allocate VM map");
2665 mem_entry
->backing
.map
= new_map
;
2666 mem_entry
->internal
= TRUE
;
2667 mem_entry
->is_sub_map
= TRUE
;
2668 mem_entry
->offset
= 0;
2669 mem_entry
->protection
= VM_PROT_ALL
;
2670 mem_entry
->size
= size
;
2672 SHARED_REGION_TRACE_DEBUG(
2673 ("commpage: _init(0x%llx) <- %p\n",
2674 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
2680 * Initialize the comm text pages at boot time
2682 extern u_int32_t
random(void);
2684 vm_commpage_text_init(void)
2686 SHARED_REGION_TRACE_DEBUG(
2687 ("commpage text: ->init()\n"));
2688 #if defined(__i386__) || defined(__x86_64__)
2689 /* create the 32 bit comm text page */
2690 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
2691 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2692 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
2693 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
2694 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
2695 /* XXX if (cpu_is_64bit_capable()) ? */
2696 /* create the 64-bit comm page */
2697 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
2698 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
2699 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
2700 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
2701 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
2703 commpage_text_populate();
2704 #elif defined(__arm64__) || defined(__arm__)
2706 #error Unknown architecture.
2707 #endif /* __i386__ || __x86_64__ */
2708 /* populate the routines in here */
2709 SHARED_REGION_TRACE_DEBUG(
2710 ("commpage text: init() <-\n"));
2714 * Initialize the comm pages at boot time.
2717 vm_commpage_init(void)
2719 SHARED_REGION_TRACE_DEBUG(
2720 ("commpage: -> init()\n"));
2722 #if defined(__i386__) || defined(__x86_64__)
2723 /* create the 32-bit comm page */
2724 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
2725 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
2726 commpage32_map
= commpage32_entry
->backing
.map
;
2728 /* XXX if (cpu_is_64bit_capable()) ? */
2729 /* create the 64-bit comm page */
2730 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
2731 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
2732 commpage64_map
= commpage64_entry
->backing
.map
;
2734 #endif /* __i386__ || __x86_64__ */
2736 /* populate them according to this specific platform */
2737 commpage_populate();
2738 __commpage_setup
= 1;
2739 #if defined(__i386__) || defined(__x86_64__)
2740 if (__system_power_source
== 0) {
2741 post_sys_powersource_internal(0, 1);
2743 #endif /* __i386__ || __x86_64__ */
2745 SHARED_REGION_TRACE_DEBUG(
2746 ("commpage: init() <-\n"));
2750 * Enter the appropriate comm page into the task's address space.
2751 * This is called at exec() time via vm_map_exec().
2759 #if defined(__arm__)
2760 #pragma unused(is64bit)
2763 return KERN_SUCCESS
;
2764 #elif defined(__arm64__)
2765 #pragma unused(is64bit)
2768 pmap_insert_sharedpage(vm_map_pmap(map
));
2769 return KERN_SUCCESS
;
2771 ipc_port_t commpage_handle
, commpage_text_handle
;
2772 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
2773 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
2775 vm_map_kernel_flags_t vmk_flags
;
2778 SHARED_REGION_TRACE_DEBUG(
2779 ("commpage: -> enter(%p,%p)\n",
2780 (void *)VM_KERNEL_ADDRPERM(map
),
2781 (void *)VM_KERNEL_ADDRPERM(task
)));
2783 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
2784 /* the comm page is likely to be beyond the actual end of the VM map */
2785 vm_flags
= VM_FLAGS_FIXED
;
2786 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2787 vmk_flags
.vmkf_beyond_max
= TRUE
;
2789 /* select the appropriate comm page for this task */
2790 assert(!(is64bit
^ vm_map_is_64bit(map
)));
2792 commpage_handle
= commpage64_handle
;
2793 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
2794 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
2795 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
2796 objc_address
= _COMM_PAGE64_OBJC_BASE
;
2797 commpage_text_handle
= commpage_text64_handle
;
2798 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
2800 commpage_handle
= commpage32_handle
;
2802 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
2803 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
2804 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
2805 objc_address
= _COMM_PAGE32_OBJC_BASE
;
2806 commpage_text_handle
= commpage_text32_handle
;
2807 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
2810 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
2811 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
2812 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
2813 /* the commpage is properly aligned or sized for pmap-nesting */
2814 tag
= VM_MEMORY_SHARED_PMAP
;
2816 /* map the comm page in the task's address space */
2817 assert(commpage_handle
!= IPC_PORT_NULL
);
2818 kr
= vm_map_enter_mem_object(
2832 if (kr
!= KERN_SUCCESS
) {
2833 SHARED_REGION_TRACE_ERROR(
2834 ("commpage: enter(%p,0x%llx,0x%llx) "
2835 "commpage %p mapping failed 0x%x\n",
2836 (void *)VM_KERNEL_ADDRPERM(map
),
2837 (long long)commpage_address
,
2838 (long long)commpage_size
,
2839 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
2842 /* map the comm text page in the task's address space */
2843 assert(commpage_text_handle
!= IPC_PORT_NULL
);
2844 kr
= vm_map_enter_mem_object(
2846 &commpage_text_address
,
2852 commpage_text_handle
,
2855 VM_PROT_READ
| VM_PROT_EXECUTE
,
2856 VM_PROT_READ
| VM_PROT_EXECUTE
,
2858 if (kr
!= KERN_SUCCESS
) {
2859 SHARED_REGION_TRACE_ERROR(
2860 ("commpage text: enter(%p,0x%llx,0x%llx) "
2861 "commpage text %p mapping failed 0x%x\n",
2862 (void *)VM_KERNEL_ADDRPERM(map
),
2863 (long long)commpage_text_address
,
2864 (long long)commpage_text_size
,
2865 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2869 * Since we're here, we also pre-allocate some virtual space for the
2870 * Objective-C run-time, if needed...
2872 if (objc_size
!= 0) {
2873 kr
= vm_map_enter_mem_object(
2886 VM_INHERIT_DEFAULT
);
2887 if (kr
!= KERN_SUCCESS
) {
2888 SHARED_REGION_TRACE_ERROR(
2889 ("commpage: enter(%p,0x%llx,0x%llx) "
2890 "objc mapping failed 0x%x\n",
2891 (void *)VM_KERNEL_ADDRPERM(map
),
2892 (long long)objc_address
,
2893 (long long)objc_size
, kr
));
2897 SHARED_REGION_TRACE_DEBUG(
2898 ("commpage: enter(%p,%p) <- 0x%x\n",
2899 (void *)VM_KERNEL_ADDRPERM(map
),
2900 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2906 vm_shared_region_slide(uint32_t slide
,
2907 mach_vm_offset_t entry_start_address
,
2908 mach_vm_size_t entry_size
,
2909 mach_vm_offset_t slide_start
,
2910 mach_vm_size_t slide_size
,
2911 mach_vm_offset_t slid_mapping
,
2912 memory_object_control_t sr_file_control
)
2914 void *slide_info_entry
= NULL
;
2916 vm_shared_region_t sr
;
2918 SHARED_REGION_TRACE_DEBUG(
2919 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2920 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2922 sr
= vm_shared_region_get(current_task());
2924 printf("%s: no shared region?\n", __FUNCTION__
);
2925 SHARED_REGION_TRACE_DEBUG(
2926 ("vm_shared_region_slide: <- %d (no shared region)\n",
2928 return KERN_FAILURE
;
2932 * Protect from concurrent access.
2934 vm_shared_region_lock();
2935 while (sr
->sr_slide_in_progress
) {
2936 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
2939 #ifndef CONFIG_EMBEDDED
2940 || shared_region_completed_slide
2943 vm_shared_region_unlock();
2945 vm_shared_region_deallocate(sr
);
2946 printf("%s: shared region already slid?\n", __FUNCTION__
);
2947 SHARED_REGION_TRACE_DEBUG(
2948 ("vm_shared_region_slide: <- %d (already slid)\n",
2950 return KERN_FAILURE
;
2953 sr
->sr_slide_in_progress
= TRUE
;
2954 vm_shared_region_unlock();
2956 error
= vm_shared_region_slide_mapping(sr
,
2958 entry_start_address
,
2964 printf("slide_info initialization failed with kr=%d\n", error
);
2968 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
2969 if (slide_info_entry
== NULL
) {
2970 error
= KERN_FAILURE
;
2972 error
= copyin((user_addr_t
)slide_start
,
2974 (vm_size_t
)slide_size
);
2976 error
= KERN_INVALID_ADDRESS
;
2983 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
2984 error
= KERN_INVALID_ARGUMENT
;
2985 printf("Sanity Check failed for slide_info\n");
2988 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2989 (void*)(uintptr_t)entry_start_address
,
2990 (unsigned long)entry_size
,
2991 (unsigned long)slide_size
);
2995 vm_shared_region_lock();
2997 assert(sr
->sr_slide_in_progress
);
2998 assert(sr
->sr_slid
== FALSE
);
2999 sr
->sr_slide_in_progress
= FALSE
;
3000 thread_wakeup(&sr
->sr_slide_in_progress
);
3002 if (error
== KERN_SUCCESS
) {
3006 * We don't know how to tear down a slid shared region today, because
3007 * we would have to invalidate all the pages that have been slid
3008 * atomically with respect to anyone mapping the shared region afresh.
3009 * Therefore, take a dangling reference to prevent teardown.
3012 #ifndef CONFIG_EMBEDDED
3013 shared_region_completed_slide
= TRUE
;
3016 vm_shared_region_unlock();
3018 vm_shared_region_deallocate(sr
);
3020 SHARED_REGION_TRACE_DEBUG(
3021 ("vm_shared_region_slide: <- %d\n",
3028 * This is called from powermanagement code to let kernel know the current source of power.
3029 * 0 if it is external source (connected to power )
3030 * 1 if it is internal power source ie battery
3033 #if defined(__i386__) || defined(__x86_64__)
3034 post_sys_powersource(int i
)
3036 post_sys_powersource(__unused
int i
)
3039 #if defined(__i386__) || defined(__x86_64__)
3040 post_sys_powersource_internal(i
, 0);
3041 #endif /* __i386__ || __x86_64__ */
3045 #if defined(__i386__) || defined(__x86_64__)
3047 post_sys_powersource_internal(int i
, int internal
)
3049 if (internal
== 0) {
3050 __system_power_source
= i
;
3053 if (__commpage_setup
!= 0) {
3054 if (__system_power_source
!= 0) {
3055 commpage_set_spin_count(0);
3057 commpage_set_spin_count(MP_SPIN_TRIES
);
3061 #endif /* __i386__ || __x86_64__ */