2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Shared region (... and comm page)
27 * This file handles the VM shared region and comm page.
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
38 * The point of a shared region is to reduce the setup overhead when exec'ing
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
84 * The comm pages are created and populated at boot time.
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
105 #include <mach/mach_vm.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
110 #include <vm/vm_protos.h>
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
115 /* "dyld" uses this to figure out what the kernel supports */
116 int shared_region_version
= 3;
118 /* trace level, output is sent to the system log file */
119 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR_LVL
;
121 /* should local (non-chroot) shared regions persist when no task uses them ? */
122 int shared_region_persistence
= 0; /* no by default */
124 /* delay before reclaiming an unused shared region */
125 int shared_region_destroy_delay
= 120; /* in seconds */
128 * Only one cache gets to slide on Desktop, since we can't
129 * tear down slide info properly today and the desktop actually
130 * produces lots of shared caches.
132 boolean_t shared_region_completed_slide
= FALSE
;
134 /* this lock protects all the shared region data structures */
135 lck_grp_t
*vm_shared_region_lck_grp
;
136 lck_mtx_t vm_shared_region_lock
;
138 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
139 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
140 #define vm_shared_region_sleep(event, interruptible) \
141 lck_mtx_sleep(&vm_shared_region_lock, \
146 /* the list of currently available shared regions (one per environment) */
147 queue_head_t vm_shared_region_queue
;
149 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region
);
150 static vm_shared_region_t
vm_shared_region_create(
154 static void vm_shared_region_destroy(vm_shared_region_t shared_region
);
156 static void vm_shared_region_timeout(thread_call_param_t param0
,
157 thread_call_param_t param1
);
159 static int __commpage_setup
= 0;
160 #if defined(__i386__) || defined(__x86_64__)
161 static int __system_power_source
= 1; /* init to extrnal power source */
162 static void post_sys_powersource_internal(int i
, int internal
);
163 #endif /* __i386__ || __x86_64__ */
167 * Initialize the module...
170 vm_shared_region_init(void)
172 SHARED_REGION_TRACE_DEBUG(
173 ("shared_region: -> init\n"));
175 vm_shared_region_lck_grp
= lck_grp_alloc_init("vm shared region",
177 lck_mtx_init(&vm_shared_region_lock
,
178 vm_shared_region_lck_grp
,
181 queue_init(&vm_shared_region_queue
);
183 SHARED_REGION_TRACE_DEBUG(
184 ("shared_region: <- init\n"));
188 * Retrieve a task's shared region and grab an extra reference to
189 * make sure it doesn't disappear while the caller is using it.
190 * The caller is responsible for consuming that extra reference if
194 vm_shared_region_get(
197 vm_shared_region_t shared_region
;
199 SHARED_REGION_TRACE_DEBUG(
200 ("shared_region: -> get(%p)\n",
201 (void *)VM_KERNEL_ADDRPERM(task
)));
204 vm_shared_region_lock();
205 shared_region
= task
->shared_region
;
207 assert(shared_region
->sr_ref_count
> 0);
208 vm_shared_region_reference_locked(shared_region
);
210 vm_shared_region_unlock();
213 SHARED_REGION_TRACE_DEBUG(
214 ("shared_region: get(%p) <- %p\n",
215 (void *)VM_KERNEL_ADDRPERM(task
),
216 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
218 return shared_region
;
222 * Get the base address of the shared region.
223 * That's the address at which it needs to be mapped in the process's address
225 * No need to lock since this data is set when the shared region is
226 * created and is never modified after that. The caller must hold an extra
227 * reference on the shared region to prevent it from being destroyed.
230 vm_shared_region_base_address(
231 vm_shared_region_t shared_region
)
233 SHARED_REGION_TRACE_DEBUG(
234 ("shared_region: -> base_address(%p)\n",
235 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
236 assert(shared_region
->sr_ref_count
> 1);
237 SHARED_REGION_TRACE_DEBUG(
238 ("shared_region: base_address(%p) <- 0x%llx\n",
239 (void *)VM_KERNEL_ADDRPERM(shared_region
),
240 (long long)shared_region
->sr_base_address
));
241 return shared_region
->sr_base_address
;
245 * Get the size of the shared region.
246 * That's the size that needs to be mapped in the process's address
248 * No need to lock since this data is set when the shared region is
249 * created and is never modified after that. The caller must hold an extra
250 * reference on the shared region to prevent it from being destroyed.
253 vm_shared_region_size(
254 vm_shared_region_t shared_region
)
256 SHARED_REGION_TRACE_DEBUG(
257 ("shared_region: -> size(%p)\n",
258 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
259 assert(shared_region
->sr_ref_count
> 1);
260 SHARED_REGION_TRACE_DEBUG(
261 ("shared_region: size(%p) <- 0x%llx\n",
262 (void *)VM_KERNEL_ADDRPERM(shared_region
),
263 (long long)shared_region
->sr_size
));
264 return shared_region
->sr_size
;
268 * Get the memory entry of the shared region.
269 * That's the "memory object" that needs to be mapped in the process's address
271 * No need to lock since this data is set when the shared region is
272 * created and is never modified after that. The caller must hold an extra
273 * reference on the shared region to prevent it from being destroyed.
276 vm_shared_region_mem_entry(
277 vm_shared_region_t shared_region
)
279 SHARED_REGION_TRACE_DEBUG(
280 ("shared_region: -> mem_entry(%p)\n",
281 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
282 assert(shared_region
->sr_ref_count
> 1);
283 SHARED_REGION_TRACE_DEBUG(
284 ("shared_region: mem_entry(%p) <- %p\n",
285 (void *)VM_KERNEL_ADDRPERM(shared_region
),
286 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_mem_entry
)));
287 return shared_region
->sr_mem_entry
;
291 vm_shared_region_get_slide(
292 vm_shared_region_t shared_region
)
294 SHARED_REGION_TRACE_DEBUG(
295 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
296 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
297 assert(shared_region
->sr_ref_count
> 1);
298 SHARED_REGION_TRACE_DEBUG(
299 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
300 (void *)VM_KERNEL_ADDRPERM(shared_region
),
301 shared_region
->sr_slide_info
.slide
));
303 /* 0 if we haven't slid */
304 assert(shared_region
->sr_slide_info
.slide_object
!= NULL
||
305 shared_region
->sr_slide_info
.slide
== 0);
307 return shared_region
->sr_slide_info
.slide
;
310 vm_shared_region_slide_info_t
311 vm_shared_region_get_slide_info(
312 vm_shared_region_t shared_region
)
314 SHARED_REGION_TRACE_DEBUG(
315 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
316 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
317 assert(shared_region
->sr_ref_count
> 1);
318 SHARED_REGION_TRACE_DEBUG(
319 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
320 (void *)VM_KERNEL_ADDRPERM(shared_region
),
321 (void *)VM_KERNEL_ADDRPERM(&shared_region
->sr_slide_info
)));
322 return &shared_region
->sr_slide_info
;
326 * Set the shared region the process should use.
327 * A NULL new shared region means that we just want to release the old
329 * The caller should already have an extra reference on the new shared region
330 * (if any). We release a reference on the old shared region (if any).
333 vm_shared_region_set(
335 vm_shared_region_t new_shared_region
)
337 vm_shared_region_t old_shared_region
;
339 SHARED_REGION_TRACE_DEBUG(
340 ("shared_region: -> set(%p, %p)\n",
341 (void *)VM_KERNEL_ADDRPERM(task
),
342 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
345 vm_shared_region_lock();
347 old_shared_region
= task
->shared_region
;
348 if (new_shared_region
) {
349 assert(new_shared_region
->sr_ref_count
> 0);
352 task
->shared_region
= new_shared_region
;
354 vm_shared_region_unlock();
357 if (old_shared_region
) {
358 assert(old_shared_region
->sr_ref_count
> 0);
359 vm_shared_region_deallocate(old_shared_region
);
362 SHARED_REGION_TRACE_DEBUG(
363 ("shared_region: set(%p) <- old=%p new=%p\n",
364 (void *)VM_KERNEL_ADDRPERM(task
),
365 (void *)VM_KERNEL_ADDRPERM(old_shared_region
),
366 (void *)VM_KERNEL_ADDRPERM(new_shared_region
)));
370 * Lookup up the shared region for the desired environment.
371 * If none is found, create a new (empty) one.
372 * Grab an extra reference on the returned shared region, to make sure
373 * it doesn't get destroyed before the caller is done with it. The caller
374 * is responsible for consuming that extra reference if necessary.
377 vm_shared_region_lookup(
382 vm_shared_region_t shared_region
;
383 vm_shared_region_t new_shared_region
;
385 SHARED_REGION_TRACE_DEBUG(
386 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
388 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
390 shared_region
= NULL
;
391 new_shared_region
= NULL
;
393 vm_shared_region_lock();
395 queue_iterate(&vm_shared_region_queue
,
399 assert(shared_region
->sr_ref_count
> 0);
400 if (shared_region
->sr_cpu_type
== cputype
&&
401 shared_region
->sr_root_dir
== root_dir
&&
402 shared_region
->sr_64bit
== is_64bit
) {
403 /* found a match ! */
404 vm_shared_region_reference_locked(shared_region
);
408 if (new_shared_region
== NULL
) {
409 /* no match: create a new one */
410 vm_shared_region_unlock();
411 new_shared_region
= vm_shared_region_create(root_dir
,
414 /* do the lookup again, in case we lost a race */
415 vm_shared_region_lock();
418 /* still no match: use our new one */
419 shared_region
= new_shared_region
;
420 new_shared_region
= NULL
;
421 queue_enter(&vm_shared_region_queue
,
429 vm_shared_region_unlock();
431 if (new_shared_region
) {
433 * We lost a race with someone else to create a new shared
434 * region for that environment. Get rid of our unused one.
436 assert(new_shared_region
->sr_ref_count
== 1);
437 new_shared_region
->sr_ref_count
--;
438 vm_shared_region_destroy(new_shared_region
);
439 new_shared_region
= NULL
;
442 SHARED_REGION_TRACE_DEBUG(
443 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
444 (void *)VM_KERNEL_ADDRPERM(root_dir
),
446 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
448 assert(shared_region
->sr_ref_count
> 0);
449 return shared_region
;
453 * Take an extra reference on a shared region.
454 * The vm_shared_region_lock should already be held by the caller.
457 vm_shared_region_reference_locked(
458 vm_shared_region_t shared_region
)
461 lck_mtx_assert(&vm_shared_region_lock
, LCK_MTX_ASSERT_OWNED
);
464 SHARED_REGION_TRACE_DEBUG(
465 ("shared_region: -> reference_locked(%p)\n",
466 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
467 assert(shared_region
->sr_ref_count
> 0);
468 shared_region
->sr_ref_count
++;
470 if (shared_region
->sr_timer_call
!= NULL
) {
473 /* cancel and free any pending timeout */
474 cancelled
= thread_call_cancel(shared_region
->sr_timer_call
);
476 thread_call_free(shared_region
->sr_timer_call
);
477 shared_region
->sr_timer_call
= NULL
;
478 /* release the reference held by the cancelled timer */
479 shared_region
->sr_ref_count
--;
481 /* the timer will drop the reference and free itself */
485 SHARED_REGION_TRACE_DEBUG(
486 ("shared_region: reference_locked(%p) <- %d\n",
487 (void *)VM_KERNEL_ADDRPERM(shared_region
),
488 shared_region
->sr_ref_count
));
492 * Release a reference on the shared region.
493 * Destroy it if there are no references left.
496 vm_shared_region_deallocate(
497 vm_shared_region_t shared_region
)
499 SHARED_REGION_TRACE_DEBUG(
500 ("shared_region: -> deallocate(%p)\n",
501 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
503 vm_shared_region_lock();
505 assert(shared_region
->sr_ref_count
> 0);
507 if (shared_region
->sr_root_dir
== NULL
) {
509 * Local (i.e. based on the boot volume) shared regions
510 * can persist or not based on the "shared_region_persistence"
512 * Make sure that this one complies.
514 * See comments in vm_shared_region_slide() for notes about
515 * shared regions we have slid (which are not torn down currently).
517 if (shared_region_persistence
&&
518 !shared_region
->sr_persists
) {
519 /* make this one persistent */
520 shared_region
->sr_ref_count
++;
521 shared_region
->sr_persists
= TRUE
;
522 } else if (!shared_region_persistence
&&
523 shared_region
->sr_persists
) {
524 /* make this one no longer persistent */
525 assert(shared_region
->sr_ref_count
> 1);
526 shared_region
->sr_ref_count
--;
527 shared_region
->sr_persists
= FALSE
;
531 assert(shared_region
->sr_ref_count
> 0);
532 shared_region
->sr_ref_count
--;
533 SHARED_REGION_TRACE_DEBUG(
534 ("shared_region: deallocate(%p): ref now %d\n",
535 (void *)VM_KERNEL_ADDRPERM(shared_region
),
536 shared_region
->sr_ref_count
));
538 if (shared_region
->sr_ref_count
== 0) {
541 assert(!shared_region
->sr_slid
);
543 if (shared_region
->sr_timer_call
== NULL
) {
544 /* hold one reference for the timer */
545 assert(! shared_region
->sr_mapping_in_progress
);
546 shared_region
->sr_ref_count
++;
548 /* set up the timer */
549 shared_region
->sr_timer_call
= thread_call_allocate(
550 (thread_call_func_t
) vm_shared_region_timeout
,
551 (thread_call_param_t
) shared_region
);
553 /* schedule the timer */
554 clock_interval_to_deadline(shared_region_destroy_delay
,
557 thread_call_enter_delayed(shared_region
->sr_timer_call
,
560 SHARED_REGION_TRACE_DEBUG(
561 ("shared_region: deallocate(%p): armed timer\n",
562 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
564 vm_shared_region_unlock();
566 /* timer expired: let go of this shared region */
569 * We can't properly handle teardown of a slid object today.
571 assert(!shared_region
->sr_slid
);
574 * Remove it from the queue first, so no one can find
577 queue_remove(&vm_shared_region_queue
,
581 vm_shared_region_unlock();
583 /* ... and destroy it */
584 vm_shared_region_destroy(shared_region
);
585 shared_region
= NULL
;
588 vm_shared_region_unlock();
591 SHARED_REGION_TRACE_DEBUG(
592 ("shared_region: deallocate(%p) <-\n",
593 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
597 vm_shared_region_timeout(
598 thread_call_param_t param0
,
599 __unused thread_call_param_t param1
)
601 vm_shared_region_t shared_region
;
603 shared_region
= (vm_shared_region_t
) param0
;
605 vm_shared_region_deallocate(shared_region
);
609 * Create a new (empty) shared region for a new environment.
611 static vm_shared_region_t
612 vm_shared_region_create(
618 vm_named_entry_t mem_entry
;
619 ipc_port_t mem_entry_port
;
620 vm_shared_region_t shared_region
;
621 vm_shared_region_slide_info_t si
;
623 mach_vm_offset_t base_address
, pmap_nesting_start
;
624 mach_vm_size_t size
, pmap_nesting_size
;
626 SHARED_REGION_TRACE_DEBUG(
627 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
628 (void *)VM_KERNEL_ADDRPERM(root_dir
), cputype
, is_64bit
));
633 mem_entry_port
= IPC_PORT_NULL
;
634 sub_map
= VM_MAP_NULL
;
636 /* create a new shared region structure... */
637 shared_region
= kalloc(sizeof (*shared_region
));
638 if (shared_region
== NULL
) {
639 SHARED_REGION_TRACE_ERROR(
640 ("shared_region: create: couldn't allocate\n"));
644 /* figure out the correct settings for the desired environment */
648 base_address
= SHARED_REGION_BASE_X86_64
;
649 size
= SHARED_REGION_SIZE_X86_64
;
650 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_X86_64
;
651 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_X86_64
;
653 case CPU_TYPE_POWERPC
:
654 base_address
= SHARED_REGION_BASE_PPC64
;
655 size
= SHARED_REGION_SIZE_PPC64
;
656 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC64
;
657 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC64
;
660 SHARED_REGION_TRACE_ERROR(
661 ("shared_region: create: unknown cpu type %d\n",
663 kfree(shared_region
, sizeof (*shared_region
));
664 shared_region
= NULL
;
670 base_address
= SHARED_REGION_BASE_I386
;
671 size
= SHARED_REGION_SIZE_I386
;
672 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_I386
;
673 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_I386
;
675 case CPU_TYPE_POWERPC
:
676 base_address
= SHARED_REGION_BASE_PPC
;
677 size
= SHARED_REGION_SIZE_PPC
;
678 pmap_nesting_start
= SHARED_REGION_NESTING_BASE_PPC
;
679 pmap_nesting_size
= SHARED_REGION_NESTING_SIZE_PPC
;
682 SHARED_REGION_TRACE_ERROR(
683 ("shared_region: create: unknown cpu type %d\n",
685 kfree(shared_region
, sizeof (*shared_region
));
686 shared_region
= NULL
;
691 /* create a memory entry structure and a Mach port handle */
692 kr
= mach_memory_entry_allocate(&mem_entry
,
694 if (kr
!= KERN_SUCCESS
) {
695 kfree(shared_region
, sizeof (*shared_region
));
696 shared_region
= NULL
;
697 SHARED_REGION_TRACE_ERROR(
698 ("shared_region: create: "
699 "couldn't allocate mem_entry\n"));
703 /* create a VM sub map and its pmap */
704 sub_map
= vm_map_create(pmap_create(NULL
, 0, is_64bit
),
707 if (sub_map
== VM_MAP_NULL
) {
708 ipc_port_release_send(mem_entry_port
);
709 kfree(shared_region
, sizeof (*shared_region
));
710 shared_region
= NULL
;
711 SHARED_REGION_TRACE_ERROR(
712 ("shared_region: create: "
713 "couldn't allocate map\n"));
717 /* make the memory entry point to the VM sub map */
718 mem_entry
->is_sub_map
= TRUE
;
719 mem_entry
->backing
.map
= sub_map
;
720 mem_entry
->size
= size
;
721 mem_entry
->protection
= VM_PROT_ALL
;
723 /* make the shared region point at the memory entry */
724 shared_region
->sr_mem_entry
= mem_entry_port
;
726 /* fill in the shared region's environment and settings */
727 shared_region
->sr_base_address
= base_address
;
728 shared_region
->sr_size
= size
;
729 shared_region
->sr_pmap_nesting_start
= pmap_nesting_start
;
730 shared_region
->sr_pmap_nesting_size
= pmap_nesting_size
;
731 shared_region
->sr_cpu_type
= cputype
;
732 shared_region
->sr_64bit
= is_64bit
;
733 shared_region
->sr_root_dir
= root_dir
;
735 queue_init(&shared_region
->sr_q
);
736 shared_region
->sr_mapping_in_progress
= FALSE
;
737 shared_region
->sr_slide_in_progress
= FALSE
;
738 shared_region
->sr_persists
= FALSE
;
739 shared_region
->sr_slid
= FALSE
;
740 shared_region
->sr_timer_call
= NULL
;
741 shared_region
->sr_first_mapping
= (mach_vm_offset_t
) -1;
743 /* grab a reference for the caller */
744 shared_region
->sr_ref_count
= 1;
746 /* And set up slide info */
747 si
= &shared_region
->sr_slide_info
;
751 si
->slide_object
= NULL
;
752 si
->slide_info_size
= 0;
753 si
->slide_info_entry
= NULL
;
757 SHARED_REGION_TRACE_INFO(
758 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
759 "base=0x%llx,size=0x%llx) <- "
760 "%p mem=(%p,%p) map=%p pmap=%p\n",
761 (void *)VM_KERNEL_ADDRPERM(root_dir
),
762 cputype
, is_64bit
, (long long)base_address
,
764 (void *)VM_KERNEL_ADDRPERM(shared_region
),
765 (void *)VM_KERNEL_ADDRPERM(mem_entry_port
),
766 (void *)VM_KERNEL_ADDRPERM(mem_entry
),
767 (void *)VM_KERNEL_ADDRPERM(sub_map
),
768 (void *)VM_KERNEL_ADDRPERM(sub_map
->pmap
)));
770 SHARED_REGION_TRACE_INFO(
771 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
772 "base=0x%llx,size=0x%llx) <- NULL",
773 (void *)VM_KERNEL_ADDRPERM(root_dir
),
774 cputype
, is_64bit
, (long long)base_address
,
777 return shared_region
;
781 * Destroy a now-unused shared region.
782 * The shared region is no longer in the queue and can not be looked up.
785 vm_shared_region_destroy(
786 vm_shared_region_t shared_region
)
788 vm_named_entry_t mem_entry
;
791 SHARED_REGION_TRACE_INFO(
792 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
793 (void *)VM_KERNEL_ADDRPERM(shared_region
),
794 (void *)VM_KERNEL_ADDRPERM(shared_region
->sr_root_dir
),
795 shared_region
->sr_cpu_type
,
796 shared_region
->sr_64bit
));
798 assert(shared_region
->sr_ref_count
== 0);
799 assert(!shared_region
->sr_persists
);
800 assert(!shared_region
->sr_slid
);
802 mem_entry
= (vm_named_entry_t
) shared_region
->sr_mem_entry
->ip_kobject
;
803 assert(mem_entry
->is_sub_map
);
804 assert(!mem_entry
->internal
);
805 assert(!mem_entry
->is_pager
);
806 assert(!mem_entry
->is_copy
);
807 map
= mem_entry
->backing
.map
;
810 * Clean up the pmap first. The virtual addresses that were
811 * entered in this possibly "nested" pmap may have different values
812 * than the VM map's min and max offsets, if the VM sub map was
813 * mapped at a non-zero offset in the processes' main VM maps, which
814 * is usually the case, so the clean-up we do in vm_map_destroy() would
818 pmap_remove(map
->pmap
,
819 shared_region
->sr_base_address
,
820 (shared_region
->sr_base_address
+
821 shared_region
->sr_size
));
825 * Release our (one and only) handle on the memory entry.
826 * This will generate a no-senders notification, which will be processed
827 * by ipc_kobject_notify(), which will release the one and only
828 * reference on the memory entry and cause it to be destroyed, along
829 * with the VM sub map and its pmap.
831 mach_memory_entry_port_release(shared_region
->sr_mem_entry
);
833 shared_region
->sr_mem_entry
= IPC_PORT_NULL
;
835 if (shared_region
->sr_timer_call
) {
836 thread_call_free(shared_region
->sr_timer_call
);
841 * If slid, free those resources. We'll want this eventually,
842 * but can't handle it properly today.
844 si
= &shared_region
->sr_slide_info
;
845 if (si
->slide_info_entry
) {
846 kmem_free(kernel_map
,
847 (vm_offset_t
) si
->slide_info_entry
,
848 (vm_size_t
) si
->slide_info_size
);
849 vm_object_deallocate(si
->slide_object
);
853 /* release the shared region structure... */
854 kfree(shared_region
, sizeof (*shared_region
));
856 SHARED_REGION_TRACE_DEBUG(
857 ("shared_region: destroy(%p) <-\n",
858 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
859 shared_region
= NULL
;
864 * Gets the address of the first (in time) mapping in the shared region.
867 vm_shared_region_start_address(
868 vm_shared_region_t shared_region
,
869 mach_vm_offset_t
*start_address
)
872 mach_vm_offset_t sr_base_address
;
873 mach_vm_offset_t sr_first_mapping
;
875 SHARED_REGION_TRACE_DEBUG(
876 ("shared_region: -> start_address(%p)\n",
877 (void *)VM_KERNEL_ADDRPERM(shared_region
)));
878 assert(shared_region
->sr_ref_count
> 1);
880 vm_shared_region_lock();
883 * Wait if there's another thread establishing a mapping
884 * in this shared region right when we're looking at it.
885 * We want a consistent view of the map...
887 while (shared_region
->sr_mapping_in_progress
) {
888 /* wait for our turn... */
889 assert(shared_region
->sr_ref_count
> 1);
890 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
893 assert(! shared_region
->sr_mapping_in_progress
);
894 assert(shared_region
->sr_ref_count
> 1);
896 sr_base_address
= shared_region
->sr_base_address
;
897 sr_first_mapping
= shared_region
->sr_first_mapping
;
899 if (sr_first_mapping
== (mach_vm_offset_t
) -1) {
900 /* shared region is empty */
901 kr
= KERN_INVALID_ADDRESS
;
904 *start_address
= sr_base_address
+ sr_first_mapping
;
907 vm_shared_region_unlock();
909 SHARED_REGION_TRACE_DEBUG(
910 ("shared_region: start_address(%p) <- 0x%llx\n",
911 (void *)VM_KERNEL_ADDRPERM(shared_region
),
912 (long long)shared_region
->sr_base_address
));
918 vm_shared_region_undo_mappings(
920 mach_vm_offset_t sr_base_address
,
921 struct shared_file_mapping_np
*mappings
,
922 unsigned int mappings_count
)
925 vm_shared_region_t shared_region
= NULL
;
926 boolean_t reset_shared_region_state
= FALSE
;
928 shared_region
= vm_shared_region_get(current_task());
929 if (shared_region
== NULL
) {
930 printf("Failed to undo mappings because of NULL shared region.\n");
935 if (sr_map
== NULL
) {
936 ipc_port_t sr_handle
;
937 vm_named_entry_t sr_mem_entry
;
939 vm_shared_region_lock();
940 assert(shared_region
->sr_ref_count
> 1);
942 while (shared_region
->sr_mapping_in_progress
) {
943 /* wait for our turn... */
944 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
947 assert(! shared_region
->sr_mapping_in_progress
);
948 assert(shared_region
->sr_ref_count
> 1);
949 /* let others know we're working in this shared region */
950 shared_region
->sr_mapping_in_progress
= TRUE
;
952 vm_shared_region_unlock();
954 reset_shared_region_state
= TRUE
;
956 /* no need to lock because this data is never modified... */
957 sr_handle
= shared_region
->sr_mem_entry
;
958 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
959 sr_map
= sr_mem_entry
->backing
.map
;
960 sr_base_address
= shared_region
->sr_base_address
;
963 * Undo the mappings we've established so far.
965 for (j
= 0; j
< mappings_count
; j
++) {
968 if (mappings
[j
].sfm_size
== 0) {
970 * We didn't establish this
971 * mapping, so nothing to undo.
975 SHARED_REGION_TRACE_INFO(
976 ("shared_region: mapping[%d]: "
980 "maxprot:0x%x prot:0x%x: "
983 (long long)mappings
[j
].sfm_address
,
984 (long long)mappings
[j
].sfm_size
,
985 (long long)mappings
[j
].sfm_file_offset
,
986 mappings
[j
].sfm_max_prot
,
987 mappings
[j
].sfm_init_prot
));
988 kr2
= mach_vm_deallocate(
990 (mappings
[j
].sfm_address
-
992 mappings
[j
].sfm_size
);
993 assert(kr2
== KERN_SUCCESS
);
996 if (reset_shared_region_state
) {
997 vm_shared_region_lock();
998 assert(shared_region
->sr_ref_count
> 1);
999 assert(shared_region
->sr_mapping_in_progress
);
1000 /* we're done working on that shared region */
1001 shared_region
->sr_mapping_in_progress
= FALSE
;
1002 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1003 vm_shared_region_unlock();
1004 reset_shared_region_state
= FALSE
;
1007 vm_shared_region_deallocate(shared_region
);
1011 * Establish some mappings of a file in the shared region.
1012 * This is used by "dyld" via the shared_region_map_np() system call
1013 * to populate the shared region with the appropriate shared cache.
1015 * One could also call it several times to incrementally load several
1016 * libraries, as long as they do not overlap.
1017 * It will return KERN_SUCCESS if the mappings were successfully established
1018 * or if they were already established identically by another process.
1021 vm_shared_region_map_file(
1022 vm_shared_region_t shared_region
,
1023 unsigned int mappings_count
,
1024 struct shared_file_mapping_np
*mappings
,
1025 memory_object_control_t file_control
,
1026 memory_object_size_t file_size
,
1029 user_addr_t slide_start
,
1030 user_addr_t slide_size
)
1033 vm_object_t file_object
;
1034 ipc_port_t sr_handle
;
1035 vm_named_entry_t sr_mem_entry
;
1037 mach_vm_offset_t sr_base_address
;
1039 mach_port_t map_port
;
1040 vm_map_offset_t target_address
;
1042 vm_object_size_t obj_size
;
1043 struct shared_file_mapping_np
*mapping_to_slide
= NULL
;
1044 mach_vm_offset_t first_mapping
= (mach_vm_offset_t
) -1;
1050 vm_shared_region_lock();
1051 assert(shared_region
->sr_ref_count
> 1);
1053 if (shared_region
->sr_root_dir
!= root_dir
) {
1055 * This shared region doesn't match the current root
1056 * directory of this process. Deny the mapping to
1057 * avoid tainting the shared region with something that
1058 * doesn't quite belong into it.
1060 vm_shared_region_unlock();
1061 kr
= KERN_PROTECTION_FAILURE
;
1066 * Make sure we handle only one mapping at a time in a given
1067 * shared region, to avoid race conditions. This should not
1068 * happen frequently...
1070 while (shared_region
->sr_mapping_in_progress
) {
1071 /* wait for our turn... */
1072 vm_shared_region_sleep(&shared_region
->sr_mapping_in_progress
,
1075 assert(! shared_region
->sr_mapping_in_progress
);
1076 assert(shared_region
->sr_ref_count
> 1);
1077 /* let others know we're working in this shared region */
1078 shared_region
->sr_mapping_in_progress
= TRUE
;
1080 vm_shared_region_unlock();
1082 /* no need to lock because this data is never modified... */
1083 sr_handle
= shared_region
->sr_mem_entry
;
1084 sr_mem_entry
= (vm_named_entry_t
) sr_handle
->ip_kobject
;
1085 sr_map
= sr_mem_entry
->backing
.map
;
1086 sr_base_address
= shared_region
->sr_base_address
;
1088 SHARED_REGION_TRACE_DEBUG(
1089 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1090 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1091 (void *)VM_KERNEL_ADDRPERM(mappings
),
1092 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
));
1094 /* get the VM object associated with the file to be mapped */
1095 file_object
= memory_object_control_to_vm_object(file_control
);
1097 /* establish the mappings */
1098 for (i
= 0; i
< mappings_count
; i
++) {
1099 SHARED_REGION_TRACE_INFO(
1100 ("shared_region: mapping[%d]: "
1101 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1102 "maxprot:0x%x prot:0x%x\n",
1104 (long long)mappings
[i
].sfm_address
,
1105 (long long)mappings
[i
].sfm_size
,
1106 (long long)mappings
[i
].sfm_file_offset
,
1107 mappings
[i
].sfm_max_prot
,
1108 mappings
[i
].sfm_init_prot
));
1110 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1111 /* zero-filled memory */
1112 map_port
= MACH_PORT_NULL
;
1114 /* file-backed memory */
1115 __IGNORE_WCASTALIGN(map_port
= (ipc_port_t
) file_object
->pager
);
1118 if (mappings
[i
].sfm_init_prot
& VM_PROT_SLIDE
) {
1120 * This is the mapping that needs to be slid.
1122 if (mapping_to_slide
!= NULL
) {
1123 SHARED_REGION_TRACE_INFO(
1124 ("shared_region: mapping[%d]: "
1125 "address:0x%016llx size:0x%016llx "
1127 "maxprot:0x%x prot:0x%x "
1128 "will not be slid as only one such mapping is allowed...\n",
1130 (long long)mappings
[i
].sfm_address
,
1131 (long long)mappings
[i
].sfm_size
,
1132 (long long)mappings
[i
].sfm_file_offset
,
1133 mappings
[i
].sfm_max_prot
,
1134 mappings
[i
].sfm_init_prot
));
1136 mapping_to_slide
= &mappings
[i
];
1140 /* mapping's address is relative to the shared region base */
1142 mappings
[i
].sfm_address
- sr_base_address
;
1144 /* establish that mapping, OK if it's "already" there */
1145 if (map_port
== MACH_PORT_NULL
) {
1147 * We want to map some anonymous memory in a
1149 * We have to create the VM object now, so that it
1150 * can be mapped "copy-on-write".
1152 obj_size
= vm_map_round_page(mappings
[i
].sfm_size
,
1153 VM_MAP_PAGE_MASK(sr_map
));
1154 object
= vm_object_allocate(obj_size
);
1155 if (object
== VM_OBJECT_NULL
) {
1156 kr
= KERN_RESOURCE_SHORTAGE
;
1161 vm_map_round_page(mappings
[i
].sfm_size
,
1162 VM_MAP_PAGE_MASK(sr_map
)),
1164 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1168 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1169 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1170 VM_INHERIT_DEFAULT
);
1173 object
= VM_OBJECT_NULL
; /* no anonymous memory here */
1174 kr
= vm_map_enter_mem_object(
1177 vm_map_round_page(mappings
[i
].sfm_size
,
1178 VM_MAP_PAGE_MASK(sr_map
)),
1180 VM_FLAGS_FIXED
| VM_FLAGS_ALREADY
,
1182 mappings
[i
].sfm_file_offset
,
1184 mappings
[i
].sfm_init_prot
& VM_PROT_ALL
,
1185 mappings
[i
].sfm_max_prot
& VM_PROT_ALL
,
1186 VM_INHERIT_DEFAULT
);
1189 if (kr
== KERN_SUCCESS
) {
1191 * Record the first (chronologically) successful
1192 * mapping in this shared region.
1193 * We're protected by "sr_mapping_in_progress" here,
1194 * so no need to lock "shared_region".
1196 if (first_mapping
== (mach_vm_offset_t
) -1) {
1197 first_mapping
= target_address
;
1200 if (map_port
== MACH_PORT_NULL
) {
1202 * Get rid of the VM object we just created
1203 * but failed to map.
1205 vm_object_deallocate(object
);
1206 object
= VM_OBJECT_NULL
;
1208 if (kr
== KERN_MEMORY_PRESENT
) {
1210 * This exact mapping was already there:
1213 SHARED_REGION_TRACE_INFO(
1214 ("shared_region: mapping[%d]: "
1215 "address:0x%016llx size:0x%016llx "
1217 "maxprot:0x%x prot:0x%x "
1218 "already mapped...\n",
1220 (long long)mappings
[i
].sfm_address
,
1221 (long long)mappings
[i
].sfm_size
,
1222 (long long)mappings
[i
].sfm_file_offset
,
1223 mappings
[i
].sfm_max_prot
,
1224 mappings
[i
].sfm_init_prot
));
1226 * We didn't establish this mapping ourselves;
1227 * let's reset its size, so that we do not
1228 * attempt to undo it if an error occurs later.
1230 mappings
[i
].sfm_size
= 0;
1233 /* this mapping failed ! */
1234 SHARED_REGION_TRACE_ERROR(
1235 ("shared_region: mapping[%d]: "
1236 "address:0x%016llx size:0x%016llx "
1238 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1240 (long long)mappings
[i
].sfm_address
,
1241 (long long)mappings
[i
].sfm_size
,
1242 (long long)mappings
[i
].sfm_file_offset
,
1243 mappings
[i
].sfm_max_prot
,
1244 mappings
[i
].sfm_init_prot
,
1247 vm_shared_region_undo_mappings(sr_map
, sr_base_address
, mappings
, i
);
1255 if (kr
== KERN_SUCCESS
&&
1257 mapping_to_slide
!= NULL
) {
1258 kr
= vm_shared_region_slide(slide
,
1259 mapping_to_slide
->sfm_file_offset
,
1260 mapping_to_slide
->sfm_size
,
1264 if (kr
!= KERN_SUCCESS
) {
1265 SHARED_REGION_TRACE_ERROR(
1266 ("shared_region: region_slide("
1267 "slide:0x%x start:0x%016llx "
1268 "size:0x%016llx) failed 0x%x\n",
1270 (long long)slide_start
,
1271 (long long)slide_size
,
1273 vm_shared_region_undo_mappings(sr_map
,
1280 vm_shared_region_lock();
1281 assert(shared_region
->sr_ref_count
> 1);
1282 assert(shared_region
->sr_mapping_in_progress
);
1283 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1284 if (kr
== KERN_SUCCESS
&&
1285 shared_region
->sr_first_mapping
== (mach_vm_offset_t
) -1) {
1286 shared_region
->sr_first_mapping
= first_mapping
;
1288 /* we're done working on that shared region */
1289 shared_region
->sr_mapping_in_progress
= FALSE
;
1290 thread_wakeup((event_t
) &shared_region
->sr_mapping_in_progress
);
1291 vm_shared_region_unlock();
1294 SHARED_REGION_TRACE_DEBUG(
1295 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1296 (void *)VM_KERNEL_ADDRPERM(shared_region
), mappings_count
,
1297 (void *)VM_KERNEL_ADDRPERM(mappings
),
1298 (void *)VM_KERNEL_ADDRPERM(file_control
), file_size
, kr
));
1303 * Enter the appropriate shared region into "map" for "task".
1304 * This involves looking up the shared region (and possibly creating a new
1305 * one) for the desired environment, then mapping the VM sub map into the
1306 * task's VM "map", with the appropriate level of pmap-nesting.
1309 vm_shared_region_enter(
1310 struct _vm_map
*map
,
1316 vm_shared_region_t shared_region
;
1317 vm_map_offset_t sr_address
, sr_offset
, target_address
;
1318 vm_map_size_t sr_size
, mapping_size
;
1319 vm_map_offset_t sr_pmap_nesting_start
;
1320 vm_map_size_t sr_pmap_nesting_size
;
1321 ipc_port_t sr_handle
;
1324 is_64bit
= task_has_64BitAddr(task
);
1326 SHARED_REGION_TRACE_DEBUG(
1327 ("shared_region: -> "
1328 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1329 (void *)VM_KERNEL_ADDRPERM(map
),
1330 (void *)VM_KERNEL_ADDRPERM(task
),
1331 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1333 /* lookup (create if needed) the shared region for this environment */
1334 shared_region
= vm_shared_region_lookup(fsroot
, cpu
, is_64bit
);
1335 if (shared_region
== NULL
) {
1336 /* this should not happen ! */
1337 SHARED_REGION_TRACE_ERROR(
1338 ("shared_region: -> "
1339 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1340 "lookup failed !\n",
1341 (void *)VM_KERNEL_ADDRPERM(map
),
1342 (void *)VM_KERNEL_ADDRPERM(task
),
1343 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
));
1344 //panic("shared_region_enter: lookup failed\n");
1345 return KERN_FAILURE
;
1348 /* let the task use that shared region */
1349 vm_shared_region_set(task
, shared_region
);
1352 /* no need to lock since this data is never modified */
1353 sr_address
= shared_region
->sr_base_address
;
1354 sr_size
= shared_region
->sr_size
;
1355 sr_handle
= shared_region
->sr_mem_entry
;
1356 sr_pmap_nesting_start
= shared_region
->sr_pmap_nesting_start
;
1357 sr_pmap_nesting_size
= shared_region
->sr_pmap_nesting_size
;
1360 * Start mapping the shared region's VM sub map into the task's VM map.
1364 if (sr_pmap_nesting_start
> sr_address
) {
1365 /* we need to map a range without pmap-nesting first */
1366 target_address
= sr_address
;
1367 mapping_size
= sr_pmap_nesting_start
- sr_address
;
1368 kr
= vm_map_enter_mem_object(
1380 if (kr
!= KERN_SUCCESS
) {
1381 SHARED_REGION_TRACE_ERROR(
1382 ("shared_region: enter(%p,%p,%p,%d,%d): "
1383 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1384 (void *)VM_KERNEL_ADDRPERM(map
),
1385 (void *)VM_KERNEL_ADDRPERM(task
),
1386 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1388 (long long)target_address
,
1389 (long long)mapping_size
,
1390 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1393 SHARED_REGION_TRACE_DEBUG(
1394 ("shared_region: enter(%p,%p,%p,%d,%d): "
1395 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1396 (void *)VM_KERNEL_ADDRPERM(map
),
1397 (void *)VM_KERNEL_ADDRPERM(task
),
1398 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1399 (long long)target_address
, (long long)mapping_size
,
1400 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1401 sr_offset
+= mapping_size
;
1402 sr_size
-= mapping_size
;
1405 * We may need to map several pmap-nested portions, due to platform
1406 * specific restrictions on pmap nesting.
1407 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1410 sr_pmap_nesting_size
> 0;
1411 sr_offset
+= mapping_size
,
1412 sr_size
-= mapping_size
,
1413 sr_pmap_nesting_size
-= mapping_size
) {
1414 target_address
= sr_address
+ sr_offset
;
1415 mapping_size
= sr_pmap_nesting_size
;
1416 if (mapping_size
> pmap_nesting_size_max
) {
1417 mapping_size
= (vm_map_offset_t
) pmap_nesting_size_max
;
1419 kr
= vm_map_enter_mem_object(
1424 (VM_FLAGS_FIXED
| VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
)),
1431 if (kr
!= KERN_SUCCESS
) {
1432 SHARED_REGION_TRACE_ERROR(
1433 ("shared_region: enter(%p,%p,%p,%d,%d): "
1434 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1435 (void *)VM_KERNEL_ADDRPERM(map
),
1436 (void *)VM_KERNEL_ADDRPERM(task
),
1437 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1439 (long long)target_address
,
1440 (long long)mapping_size
,
1441 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1444 SHARED_REGION_TRACE_DEBUG(
1445 ("shared_region: enter(%p,%p,%p,%d,%d): "
1446 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1447 (void *)VM_KERNEL_ADDRPERM(map
),
1448 (void *)VM_KERNEL_ADDRPERM(task
),
1449 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1450 (long long)target_address
, (long long)mapping_size
,
1451 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1454 /* and there's some left to be mapped without pmap-nesting */
1455 target_address
= sr_address
+ sr_offset
;
1456 mapping_size
= sr_size
;
1457 kr
= vm_map_enter_mem_object(
1469 if (kr
!= KERN_SUCCESS
) {
1470 SHARED_REGION_TRACE_ERROR(
1471 ("shared_region: enter(%p,%p,%p,%d,%d): "
1472 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1473 (void *)VM_KERNEL_ADDRPERM(map
),
1474 (void *)VM_KERNEL_ADDRPERM(task
),
1475 (void *)VM_KERNEL_ADDRPERM(fsroot
),
1477 (long long)target_address
,
1478 (long long)mapping_size
,
1479 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1482 SHARED_REGION_TRACE_DEBUG(
1483 ("shared_region: enter(%p,%p,%p,%d,%d): "
1484 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1485 (void *)VM_KERNEL_ADDRPERM(map
),
1486 (void *)VM_KERNEL_ADDRPERM(task
),
1487 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
,
1488 (long long)target_address
, (long long)mapping_size
,
1489 (void *)VM_KERNEL_ADDRPERM(sr_handle
), kr
));
1490 sr_offset
+= mapping_size
;
1491 sr_size
-= mapping_size
;
1493 assert(sr_size
== 0);
1496 SHARED_REGION_TRACE_DEBUG(
1497 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1498 (void *)VM_KERNEL_ADDRPERM(map
),
1499 (void *)VM_KERNEL_ADDRPERM(task
),
1500 (void *)VM_KERNEL_ADDRPERM(fsroot
), cpu
, is_64bit
, kr
));
1504 #define SANE_SLIDE_INFO_SIZE (2048*1024) /*Can be changed if needed*/
1505 struct vm_shared_region_slide_info slide_info
;
1508 vm_shared_region_sliding_valid(uint32_t slide
)
1510 kern_return_t kr
= KERN_SUCCESS
;
1511 vm_shared_region_t sr
= vm_shared_region_get(current_task());
1513 /* No region yet? we're fine. */
1518 if ((sr
->sr_slid
== TRUE
) && slide
) {
1519 if (slide
!= vm_shared_region_get_slide_info(sr
)->slide
) {
1520 printf("Only one shared region can be slid\n");
1524 * Request for sliding when we've
1525 * already done it with exactly the
1526 * same slide value before.
1527 * This isn't wrong technically but
1528 * we don't want to slide again and
1529 * so we return this value.
1531 kr
= KERN_INVALID_ARGUMENT
;
1534 vm_shared_region_deallocate(sr
);
1539 vm_shared_region_slide_init(
1540 vm_shared_region_t sr
,
1541 mach_vm_size_t slide_info_size
,
1542 mach_vm_offset_t start
,
1543 mach_vm_size_t size
,
1545 memory_object_control_t sr_file_control
)
1547 kern_return_t kr
= KERN_SUCCESS
;
1548 vm_object_t object
= VM_OBJECT_NULL
;
1549 vm_object_offset_t offset
= 0;
1550 vm_shared_region_slide_info_t si
= vm_shared_region_get_slide_info(sr
);
1551 vm_offset_t slide_info_entry
;
1553 vm_map_t map
= NULL
, cur_map
= NULL
;
1554 boolean_t is_map_locked
= FALSE
;
1556 assert(sr
->sr_slide_in_progress
);
1557 assert(!sr
->sr_slid
);
1558 assert(si
->slide_object
== NULL
);
1559 assert(si
->slide_info_entry
== NULL
);
1561 if (slide_info_size
> SANE_SLIDE_INFO_SIZE
) {
1562 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size
);
1567 kr
= kmem_alloc(kernel_map
,
1568 (vm_offset_t
*) &slide_info_entry
,
1569 (vm_size_t
) slide_info_size
, VM_KERN_MEMORY_OSFMK
);
1570 if (kr
!= KERN_SUCCESS
) {
1574 if (sr_file_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1576 object
= memory_object_control_to_vm_object(sr_file_control
);
1577 vm_object_reference(object
);
1580 vm_object_lock(object
);
1583 * Remove this entire "else" block and all "map" references
1584 * once we get rid of the shared_region_slide_np()
1587 vm_map_entry_t entry
= VM_MAP_ENTRY_NULL
;
1588 map
= current_map();
1589 vm_map_lock_read(map
);
1590 is_map_locked
= TRUE
;
1593 if(!vm_map_lookup_entry(map
, start
, &entry
)) {
1594 kr
= KERN_INVALID_ARGUMENT
;
1596 vm_object_t shadow_obj
= VM_OBJECT_NULL
;
1598 if (entry
->is_sub_map
== TRUE
) {
1599 map
= VME_SUBMAP(entry
);
1600 start
-= entry
->vme_start
;
1601 start
+= VME_OFFSET(entry
);
1602 vm_map_lock_read(map
);
1603 vm_map_unlock_read(cur_map
);
1606 object
= VME_OBJECT(entry
);
1607 offset
= ((start
- entry
->vme_start
) +
1611 vm_object_lock(object
);
1612 while (object
->shadow
!= VM_OBJECT_NULL
) {
1613 shadow_obj
= object
->shadow
;
1614 vm_object_lock(shadow_obj
);
1615 vm_object_unlock(object
);
1616 object
= shadow_obj
;
1621 if (object
->internal
== TRUE
) {
1622 kr
= KERN_INVALID_ADDRESS
;
1623 } else if (object
->object_slid
) {
1624 /* Can only be slid once */
1625 printf("%s: found vm_object %p already slid?\n", __FUNCTION__
, object
);
1629 si
->slide_info_entry
= (vm_shared_region_slide_info_entry_t
)slide_info_entry
;
1630 si
->slide_info_size
= slide_info_size
;
1631 si
->slide_object
= object
;
1633 si
->end
= si
->start
+ size
;
1637 * If we want to have this region get deallocated/freed
1638 * then we will have to make sure that we msync(..MS_INVALIDATE..)
1639 * the pages associated with this shared region. Those pages would
1640 * have been slid with an older slide value.
1644 * Pointers in object are held without references; they
1645 * are disconnected at the time that we destroy the
1646 * shared region, and since the shared region holds
1647 * a reference on the object, no references in the other
1648 * direction are required.
1650 object
->object_slid
= TRUE
;
1651 object
->vo_slide_info
= si
;
1654 vm_object_unlock(object
);
1655 if (is_map_locked
== TRUE
) {
1656 vm_map_unlock_read(map
);
1659 if (kr
!= KERN_SUCCESS
) {
1660 kmem_free(kernel_map
, slide_info_entry
, slide_info_size
);
1666 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr
) {
1667 return (void*)sr
->sr_slide_info
.slide_info_entry
;
1672 vm_shared_region_slide_sanity_check(vm_shared_region_t sr
)
1674 uint32_t pageIndex
=0;
1675 uint16_t entryIndex
=0;
1676 uint16_t *toc
= NULL
;
1677 vm_shared_region_slide_info_t si
;
1678 vm_shared_region_slide_info_entry_t s_info
;
1681 si
= vm_shared_region_get_slide_info(sr
);
1682 s_info
= si
->slide_info_entry
;
1683 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1685 kr
= mach_vm_protect(kernel_map
,
1686 (mach_vm_offset_t
)(vm_offset_t
)s_info
,
1687 (mach_vm_size_t
) si
->slide_info_size
,
1688 TRUE
, VM_PROT_READ
);
1689 if (kr
!= KERN_SUCCESS
) {
1690 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr
);
1693 for (;pageIndex
< s_info
->toc_count
; pageIndex
++) {
1695 entryIndex
= (uint16_t)(toc
[pageIndex
]);
1697 if (entryIndex
>= s_info
->entry_count
) {
1698 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex
, entryIndex
, s_info
->entry_count
);
1703 return KERN_SUCCESS
;
1705 if (si
->slide_info_entry
!= NULL
) {
1706 kmem_free(kernel_map
,
1707 (vm_offset_t
) si
->slide_info_entry
,
1708 (vm_size_t
) si
->slide_info_size
);
1710 vm_object_lock(si
->slide_object
);
1711 si
->slide_object
->object_slid
= FALSE
;
1712 si
->slide_object
->vo_slide_info
= NULL
;
1713 vm_object_unlock(si
->slide_object
);
1715 vm_object_deallocate(si
->slide_object
);
1716 si
->slide_object
= NULL
;
1720 si
->slide_info_entry
= NULL
;
1721 si
->slide_info_size
= 0;
1723 return KERN_FAILURE
;
1727 vm_shared_region_slide_page(vm_shared_region_slide_info_t si
, vm_offset_t vaddr
, uint32_t pageIndex
)
1729 uint16_t *toc
= NULL
;
1730 slide_info_entry_toc_t bitmap
= NULL
;
1733 uint32_t slide
= si
->slide
;
1734 int is_64
= task_has_64BitAddr(current_task());
1736 vm_shared_region_slide_info_entry_t s_info
= si
->slide_info_entry
;
1737 toc
= (uint16_t*)((uintptr_t)s_info
+ s_info
->toc_offset
);
1739 if (pageIndex
>= s_info
->toc_count
) {
1740 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex
, s_info
->toc_count
);
1742 uint16_t entryIndex
= (uint16_t)(toc
[pageIndex
]);
1743 slide_info_entry_toc_t slide_info_entries
= (slide_info_entry_toc_t
)((uintptr_t)s_info
+ s_info
->entry_offset
);
1745 if (entryIndex
>= s_info
->entry_count
) {
1746 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex
, s_info
->entry_count
);
1748 bitmap
= &slide_info_entries
[entryIndex
];
1750 for(i
=0; i
< NUM_SLIDING_BITMAPS_PER_PAGE
; ++i
) {
1751 b
= bitmap
->entry
[i
];
1753 for (j
=0; j
<8; ++j
) {
1755 uint32_t *ptr_to_slide
;
1758 ptr_to_slide
= (uint32_t*)((uintptr_t)(vaddr
)+(sizeof(uint32_t)*(i
*8 +j
)));
1759 old_value
= *ptr_to_slide
;
1760 *ptr_to_slide
+= slide
;
1761 if (is_64
&& *ptr_to_slide
< old_value
) {
1763 * We just slid the low 32 bits of a 64-bit pointer
1764 * and it looks like there should have been a carry-over
1765 * to the upper 32 bits.
1766 * The sliding failed...
1768 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
1769 i
, j
, b
, slide
, old_value
, *ptr_to_slide
);
1770 return KERN_FAILURE
;
1779 return KERN_SUCCESS
;
1782 /******************************************************************************/
1783 /* Comm page support */
1784 /******************************************************************************/
1786 ipc_port_t commpage32_handle
= IPC_PORT_NULL
;
1787 ipc_port_t commpage64_handle
= IPC_PORT_NULL
;
1788 vm_named_entry_t commpage32_entry
= NULL
;
1789 vm_named_entry_t commpage64_entry
= NULL
;
1790 vm_map_t commpage32_map
= VM_MAP_NULL
;
1791 vm_map_t commpage64_map
= VM_MAP_NULL
;
1793 ipc_port_t commpage_text32_handle
= IPC_PORT_NULL
;
1794 ipc_port_t commpage_text64_handle
= IPC_PORT_NULL
;
1795 vm_named_entry_t commpage_text32_entry
= NULL
;
1796 vm_named_entry_t commpage_text64_entry
= NULL
;
1797 vm_map_t commpage_text32_map
= VM_MAP_NULL
;
1798 vm_map_t commpage_text64_map
= VM_MAP_NULL
;
1800 user32_addr_t commpage_text32_location
= (user32_addr_t
) _COMM_PAGE32_TEXT_START
;
1801 user64_addr_t commpage_text64_location
= (user64_addr_t
) _COMM_PAGE64_TEXT_START
;
1803 #if defined(__i386__) || defined(__x86_64__)
1805 * Create a memory entry, VM submap and pmap for one commpage.
1809 ipc_port_t
*handlep
,
1813 vm_named_entry_t mem_entry
;
1816 SHARED_REGION_TRACE_DEBUG(
1817 ("commpage: -> _init(0x%llx)\n",
1820 kr
= mach_memory_entry_allocate(&mem_entry
,
1822 if (kr
!= KERN_SUCCESS
) {
1823 panic("_vm_commpage_init: could not allocate mem_entry");
1825 new_map
= vm_map_create(pmap_create(NULL
, 0, 0), 0, size
, TRUE
);
1826 if (new_map
== VM_MAP_NULL
) {
1827 panic("_vm_commpage_init: could not allocate VM map");
1829 mem_entry
->backing
.map
= new_map
;
1830 mem_entry
->internal
= TRUE
;
1831 mem_entry
->is_sub_map
= TRUE
;
1832 mem_entry
->offset
= 0;
1833 mem_entry
->protection
= VM_PROT_ALL
;
1834 mem_entry
->size
= size
;
1836 SHARED_REGION_TRACE_DEBUG(
1837 ("commpage: _init(0x%llx) <- %p\n",
1838 (long long)size
, (void *)VM_KERNEL_ADDRPERM(*handlep
)));
1844 *Initialize the comm text pages at boot time
1846 extern u_int32_t
random(void);
1848 vm_commpage_text_init(void)
1850 SHARED_REGION_TRACE_DEBUG(
1851 ("commpage text: ->init()\n"));
1852 #if defined(__i386__) || defined(__x86_64__)
1853 /* create the 32 bit comm text page */
1854 unsigned int offset
= (random() % _PFZ32_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting to 32bMAX-2PAGE */
1855 _vm_commpage_init(&commpage_text32_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
1856 commpage_text32_entry
= (vm_named_entry_t
) commpage_text32_handle
->ip_kobject
;
1857 commpage_text32_map
= commpage_text32_entry
->backing
.map
;
1858 commpage_text32_location
= (user32_addr_t
) (_COMM_PAGE32_TEXT_START
+ offset
);
1859 /* XXX if (cpu_is_64bit_capable()) ? */
1860 /* create the 64-bit comm page */
1861 offset
= (random() % _PFZ64_SLIDE_RANGE
) << PAGE_SHIFT
; /* restricting sliding upto 2Mb range */
1862 _vm_commpage_init(&commpage_text64_handle
, _COMM_PAGE_TEXT_AREA_LENGTH
);
1863 commpage_text64_entry
= (vm_named_entry_t
) commpage_text64_handle
->ip_kobject
;
1864 commpage_text64_map
= commpage_text64_entry
->backing
.map
;
1865 commpage_text64_location
= (user64_addr_t
) (_COMM_PAGE64_TEXT_START
+ offset
);
1867 commpage_text_populate();
1869 #error Unknown architecture.
1870 #endif /* __i386__ || __x86_64__ */
1871 /* populate the routines in here */
1872 SHARED_REGION_TRACE_DEBUG(
1873 ("commpage text: init() <-\n"));
1878 * Initialize the comm pages at boot time.
1881 vm_commpage_init(void)
1883 SHARED_REGION_TRACE_DEBUG(
1884 ("commpage: -> init()\n"));
1886 #if defined(__i386__) || defined(__x86_64__)
1887 /* create the 32-bit comm page */
1888 _vm_commpage_init(&commpage32_handle
, _COMM_PAGE32_AREA_LENGTH
);
1889 commpage32_entry
= (vm_named_entry_t
) commpage32_handle
->ip_kobject
;
1890 commpage32_map
= commpage32_entry
->backing
.map
;
1892 /* XXX if (cpu_is_64bit_capable()) ? */
1893 /* create the 64-bit comm page */
1894 _vm_commpage_init(&commpage64_handle
, _COMM_PAGE64_AREA_LENGTH
);
1895 commpage64_entry
= (vm_named_entry_t
) commpage64_handle
->ip_kobject
;
1896 commpage64_map
= commpage64_entry
->backing
.map
;
1898 #endif /* __i386__ || __x86_64__ */
1900 /* populate them according to this specific platform */
1901 commpage_populate();
1902 __commpage_setup
= 1;
1903 #if defined(__i386__) || defined(__x86_64__)
1904 if (__system_power_source
== 0) {
1905 post_sys_powersource_internal(0, 1);
1907 #endif /* __i386__ || __x86_64__ */
1909 SHARED_REGION_TRACE_DEBUG(
1910 ("commpage: init() <-\n"));
1914 * Enter the appropriate comm page into the task's address space.
1915 * This is called at exec() time via vm_map_exec().
1922 ipc_port_t commpage_handle
, commpage_text_handle
;
1923 vm_map_offset_t commpage_address
, objc_address
, commpage_text_address
;
1924 vm_map_size_t commpage_size
, objc_size
, commpage_text_size
;
1928 SHARED_REGION_TRACE_DEBUG(
1929 ("commpage: -> enter(%p,%p)\n",
1930 (void *)VM_KERNEL_ADDRPERM(map
),
1931 (void *)VM_KERNEL_ADDRPERM(task
)));
1933 commpage_text_size
= _COMM_PAGE_TEXT_AREA_LENGTH
;
1934 /* the comm page is likely to be beyond the actual end of the VM map */
1935 vm_flags
= VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
;
1937 /* select the appropriate comm page for this task */
1938 assert(! (task_has_64BitAddr(task
) ^ vm_map_is_64bit(map
)));
1939 if (task_has_64BitAddr(task
)) {
1940 commpage_handle
= commpage64_handle
;
1941 commpage_address
= (vm_map_offset_t
) _COMM_PAGE64_BASE_ADDRESS
;
1942 commpage_size
= _COMM_PAGE64_AREA_LENGTH
;
1943 objc_size
= _COMM_PAGE64_OBJC_SIZE
;
1944 objc_address
= _COMM_PAGE64_OBJC_BASE
;
1945 commpage_text_handle
= commpage_text64_handle
;
1946 commpage_text_address
= (vm_map_offset_t
) commpage_text64_location
;
1948 commpage_handle
= commpage32_handle
;
1950 (vm_map_offset_t
)(unsigned) _COMM_PAGE32_BASE_ADDRESS
;
1951 commpage_size
= _COMM_PAGE32_AREA_LENGTH
;
1952 objc_size
= _COMM_PAGE32_OBJC_SIZE
;
1953 objc_address
= _COMM_PAGE32_OBJC_BASE
;
1954 commpage_text_handle
= commpage_text32_handle
;
1955 commpage_text_address
= (vm_map_offset_t
) commpage_text32_location
;
1958 if ((commpage_address
& (pmap_nesting_size_min
- 1)) == 0 &&
1959 (commpage_size
& (pmap_nesting_size_min
- 1)) == 0) {
1960 /* the commpage is properly aligned or sized for pmap-nesting */
1961 vm_flags
|= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP
);
1963 /* map the comm page in the task's address space */
1964 assert(commpage_handle
!= IPC_PORT_NULL
);
1965 kr
= vm_map_enter_mem_object(
1977 if (kr
!= KERN_SUCCESS
) {
1978 SHARED_REGION_TRACE_ERROR(
1979 ("commpage: enter(%p,0x%llx,0x%llx) "
1980 "commpage %p mapping failed 0x%x\n",
1981 (void *)VM_KERNEL_ADDRPERM(map
),
1982 (long long)commpage_address
,
1983 (long long)commpage_size
,
1984 (void *)VM_KERNEL_ADDRPERM(commpage_handle
), kr
));
1987 /* map the comm text page in the task's address space */
1988 assert(commpage_text_handle
!= IPC_PORT_NULL
);
1989 kr
= vm_map_enter_mem_object(
1991 &commpage_text_address
,
1995 commpage_text_handle
,
1998 VM_PROT_READ
|VM_PROT_EXECUTE
,
1999 VM_PROT_READ
|VM_PROT_EXECUTE
,
2001 if (kr
!= KERN_SUCCESS
) {
2002 SHARED_REGION_TRACE_ERROR(
2003 ("commpage text: enter(%p,0x%llx,0x%llx) "
2004 "commpage text %p mapping failed 0x%x\n",
2005 (void *)VM_KERNEL_ADDRPERM(map
),
2006 (long long)commpage_text_address
,
2007 (long long)commpage_text_size
,
2008 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle
), kr
));
2012 * Since we're here, we also pre-allocate some virtual space for the
2013 * Objective-C run-time, if needed...
2015 if (objc_size
!= 0) {
2016 kr
= vm_map_enter_mem_object(
2021 VM_FLAGS_FIXED
| VM_FLAGS_BEYOND_MAX
,
2027 VM_INHERIT_DEFAULT
);
2028 if (kr
!= KERN_SUCCESS
) {
2029 SHARED_REGION_TRACE_ERROR(
2030 ("commpage: enter(%p,0x%llx,0x%llx) "
2031 "objc mapping failed 0x%x\n",
2032 (void *)VM_KERNEL_ADDRPERM(map
),
2033 (long long)objc_address
,
2034 (long long)objc_size
, kr
));
2038 SHARED_REGION_TRACE_DEBUG(
2039 ("commpage: enter(%p,%p) <- 0x%x\n",
2040 (void *)VM_KERNEL_ADDRPERM(map
),
2041 (void *)VM_KERNEL_ADDRPERM(task
), kr
));
2046 vm_shared_region_slide(uint32_t slide
,
2047 mach_vm_offset_t entry_start_address
,
2048 mach_vm_size_t entry_size
,
2049 mach_vm_offset_t slide_start
,
2050 mach_vm_size_t slide_size
,
2051 memory_object_control_t sr_file_control
)
2053 void *slide_info_entry
= NULL
;
2055 vm_shared_region_t sr
;
2057 SHARED_REGION_TRACE_DEBUG(
2058 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2059 slide
, entry_start_address
, entry_size
, slide_start
, slide_size
));
2061 sr
= vm_shared_region_get(current_task());
2063 printf("%s: no shared region?\n", __FUNCTION__
);
2064 SHARED_REGION_TRACE_DEBUG(
2065 ("vm_shared_region_slide: <- %d (no shared region)\n",
2067 return KERN_FAILURE
;
2071 * Protect from concurrent access.
2073 vm_shared_region_lock();
2074 while(sr
->sr_slide_in_progress
) {
2075 vm_shared_region_sleep(&sr
->sr_slide_in_progress
, THREAD_UNINT
);
2078 || shared_region_completed_slide
2080 vm_shared_region_unlock();
2082 vm_shared_region_deallocate(sr
);
2083 printf("%s: shared region already slid?\n", __FUNCTION__
);
2084 SHARED_REGION_TRACE_DEBUG(
2085 ("vm_shared_region_slide: <- %d (already slid)\n",
2087 return KERN_FAILURE
;
2090 sr
->sr_slide_in_progress
= TRUE
;
2091 vm_shared_region_unlock();
2093 if((error
= vm_shared_region_slide_init(sr
, slide_size
, entry_start_address
, entry_size
, slide
, sr_file_control
))) {
2094 printf("slide_info initialization failed with kr=%d\n", error
);
2098 slide_info_entry
= vm_shared_region_get_slide_info_entry(sr
);
2099 if (slide_info_entry
== NULL
){
2100 error
= KERN_FAILURE
;
2102 error
= copyin((user_addr_t
)slide_start
,
2104 (vm_size_t
)slide_size
);
2106 error
= KERN_INVALID_ADDRESS
;
2113 if (vm_shared_region_slide_sanity_check(sr
) != KERN_SUCCESS
) {
2114 error
= KERN_INVALID_ARGUMENT
;
2115 printf("Sanity Check failed for slide_info\n");
2118 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2119 (void*)(uintptr_t)entry_start_address
,
2120 (unsigned long)entry_size
,
2121 (unsigned long)slide_size
);
2125 vm_shared_region_lock();
2127 assert(sr
->sr_slide_in_progress
);
2128 assert(sr
->sr_slid
== FALSE
);
2129 sr
->sr_slide_in_progress
= FALSE
;
2130 thread_wakeup(&sr
->sr_slide_in_progress
);
2132 if (error
== KERN_SUCCESS
) {
2136 * We don't know how to tear down a slid shared region today, because
2137 * we would have to invalidate all the pages that have been slid
2138 * atomically with respect to anyone mapping the shared region afresh.
2139 * Therefore, take a dangling reference to prevent teardown.
2142 shared_region_completed_slide
= TRUE
;
2144 vm_shared_region_unlock();
2146 vm_shared_region_deallocate(sr
);
2148 SHARED_REGION_TRACE_DEBUG(
2149 ("vm_shared_region_slide: <- %d\n",
2156 * This is called from powermanagement code to let kernel know the current source of power.
2157 * 0 if it is external source (connected to power )
2158 * 1 if it is internal power source ie battery
2161 #if defined(__i386__) || defined(__x86_64__)
2162 post_sys_powersource(int i
)
2164 post_sys_powersource(__unused
int i
)
2167 #if defined(__i386__) || defined(__x86_64__)
2168 post_sys_powersource_internal(i
, 0);
2169 #endif /* __i386__ || __x86_64__ */
2173 #if defined(__i386__) || defined(__x86_64__)
2175 post_sys_powersource_internal(int i
, int internal
)
2178 __system_power_source
= i
;
2180 if (__commpage_setup
!= 0) {
2181 if (__system_power_source
!= 0)
2182 commpage_set_spin_count(0);
2184 commpage_set_spin_count(MP_SPIN_TRIES
);
2187 #endif /* __i386__ || __x86_64__ */