]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_region.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_region.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*
25 * Shared region (... and comm page)
26 *
27 * This file handles the VM shared region and comm page.
28 *
29 */
30 /*
31 * SHARED REGIONS
32 * --------------
33 *
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
37 *
38 * The point of a shared region is to reduce the setup overhead when exec'ing
39 * a new process.
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
45 * region.
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
51 *
52 *
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
58 *
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
63 *
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
66 *
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
69 *
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
74 */
75 /*
76 * COMM PAGE
77 *
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
83 *
84 * The comm pages are created and populated at boot time.
85 *
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
89 *
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
92 *
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
97 */
98
99 #include <debug.h>
100
101 #include <kern/ipc_tt.h>
102 #include <kern/kalloc.h>
103 #include <kern/thread_call.h>
104
105 #include <mach/mach_vm.h>
106
107 #include <vm/vm_map.h>
108 #include <vm/vm_shared_region.h>
109
110 #include <vm/vm_protos.h>
111
112 #include <machine/commpage.h>
113 #include <machine/cpu_capabilities.h>
114
115 #if defined (__arm__) || defined(__arm64__)
116 #include <arm/cpu_data_internal.h>
117 #endif
118
119 /*
120 * the following codes are used in the subclass
121 * of the DBG_MACH_SHAREDREGION class
122 */
123 #define PROCESS_SHARED_CACHE_LAYOUT 0x00
124
125
126 /* "dyld" uses this to figure out what the kernel supports */
127 int shared_region_version = 3;
128
129 /* trace level, output is sent to the system log file */
130 int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL;
131
132 /* should local (non-chroot) shared regions persist when no task uses them ? */
133 int shared_region_persistence = 0; /* no by default */
134
135 /* delay before reclaiming an unused shared region */
136 int shared_region_destroy_delay = 120; /* in seconds */
137
138 struct vm_shared_region *init_task_shared_region = NULL;
139
140 #ifndef CONFIG_EMBEDDED
141 /*
142 * Only one cache gets to slide on Desktop, since we can't
143 * tear down slide info properly today and the desktop actually
144 * produces lots of shared caches.
145 */
146 boolean_t shared_region_completed_slide = FALSE;
147 #endif
148
149 /* this lock protects all the shared region data structures */
150 lck_grp_t *vm_shared_region_lck_grp;
151 lck_mtx_t vm_shared_region_lock;
152
153 #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
154 #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
155 #define vm_shared_region_sleep(event, interruptible) \
156 lck_mtx_sleep(&vm_shared_region_lock, \
157 LCK_SLEEP_DEFAULT, \
158 (event_t) (event), \
159 (interruptible))
160
161 /* the list of currently available shared regions (one per environment) */
162 queue_head_t vm_shared_region_queue;
163
164 static void vm_shared_region_reference_locked(vm_shared_region_t shared_region);
165 static vm_shared_region_t vm_shared_region_create(
166 void *root_dir,
167 cpu_type_t cputype,
168 cpu_subtype_t cpu_subtype,
169 boolean_t is_64bit);
170 static void vm_shared_region_destroy(vm_shared_region_t shared_region);
171
172 static void vm_shared_region_timeout(thread_call_param_t param0,
173 thread_call_param_t param1);
174 kern_return_t vm_shared_region_slide_mapping(
175 vm_shared_region_t sr,
176 mach_vm_size_t slide_info_size,
177 mach_vm_offset_t start,
178 mach_vm_size_t size,
179 mach_vm_offset_t slid_mapping,
180 uint32_t slide,
181 memory_object_control_t); /* forward */
182
183 static int __commpage_setup = 0;
184 #if defined(__i386__) || defined(__x86_64__)
185 static int __system_power_source = 1; /* init to extrnal power source */
186 static void post_sys_powersource_internal(int i, int internal);
187 #endif /* __i386__ || __x86_64__ */
188
189
190 /*
191 * Initialize the module...
192 */
193 void
194 vm_shared_region_init(void)
195 {
196 SHARED_REGION_TRACE_DEBUG(
197 ("shared_region: -> init\n"));
198
199 vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region",
200 LCK_GRP_ATTR_NULL);
201 lck_mtx_init(&vm_shared_region_lock,
202 vm_shared_region_lck_grp,
203 LCK_ATTR_NULL);
204
205 queue_init(&vm_shared_region_queue);
206
207 SHARED_REGION_TRACE_DEBUG(
208 ("shared_region: <- init\n"));
209 }
210
211 /*
212 * Retrieve a task's shared region and grab an extra reference to
213 * make sure it doesn't disappear while the caller is using it.
214 * The caller is responsible for consuming that extra reference if
215 * necessary.
216 */
217 vm_shared_region_t
218 vm_shared_region_get(
219 task_t task)
220 {
221 vm_shared_region_t shared_region;
222
223 SHARED_REGION_TRACE_DEBUG(
224 ("shared_region: -> get(%p)\n",
225 (void *)VM_KERNEL_ADDRPERM(task)));
226
227 task_lock(task);
228 vm_shared_region_lock();
229 shared_region = task->shared_region;
230 if (shared_region) {
231 assert(shared_region->sr_ref_count > 0);
232 vm_shared_region_reference_locked(shared_region);
233 }
234 vm_shared_region_unlock();
235 task_unlock(task);
236
237 SHARED_REGION_TRACE_DEBUG(
238 ("shared_region: get(%p) <- %p\n",
239 (void *)VM_KERNEL_ADDRPERM(task),
240 (void *)VM_KERNEL_ADDRPERM(shared_region)));
241
242 return shared_region;
243 }
244
245 /*
246 * Get the base address of the shared region.
247 * That's the address at which it needs to be mapped in the process's address
248 * space.
249 * No need to lock since this data is set when the shared region is
250 * created and is never modified after that. The caller must hold an extra
251 * reference on the shared region to prevent it from being destroyed.
252 */
253 mach_vm_offset_t
254 vm_shared_region_base_address(
255 vm_shared_region_t shared_region)
256 {
257 SHARED_REGION_TRACE_DEBUG(
258 ("shared_region: -> base_address(%p)\n",
259 (void *)VM_KERNEL_ADDRPERM(shared_region)));
260 assert(shared_region->sr_ref_count > 1);
261 SHARED_REGION_TRACE_DEBUG(
262 ("shared_region: base_address(%p) <- 0x%llx\n",
263 (void *)VM_KERNEL_ADDRPERM(shared_region),
264 (long long)shared_region->sr_base_address));
265 return shared_region->sr_base_address;
266 }
267
268 /*
269 * Get the size of the shared region.
270 * That's the size that needs to be mapped in the process's address
271 * space.
272 * No need to lock since this data is set when the shared region is
273 * created and is never modified after that. The caller must hold an extra
274 * reference on the shared region to prevent it from being destroyed.
275 */
276 mach_vm_size_t
277 vm_shared_region_size(
278 vm_shared_region_t shared_region)
279 {
280 SHARED_REGION_TRACE_DEBUG(
281 ("shared_region: -> size(%p)\n",
282 (void *)VM_KERNEL_ADDRPERM(shared_region)));
283 assert(shared_region->sr_ref_count > 1);
284 SHARED_REGION_TRACE_DEBUG(
285 ("shared_region: size(%p) <- 0x%llx\n",
286 (void *)VM_KERNEL_ADDRPERM(shared_region),
287 (long long)shared_region->sr_size));
288 return shared_region->sr_size;
289 }
290
291 /*
292 * Get the memory entry of the shared region.
293 * That's the "memory object" that needs to be mapped in the process's address
294 * space.
295 * No need to lock since this data is set when the shared region is
296 * created and is never modified after that. The caller must hold an extra
297 * reference on the shared region to prevent it from being destroyed.
298 */
299 ipc_port_t
300 vm_shared_region_mem_entry(
301 vm_shared_region_t shared_region)
302 {
303 SHARED_REGION_TRACE_DEBUG(
304 ("shared_region: -> mem_entry(%p)\n",
305 (void *)VM_KERNEL_ADDRPERM(shared_region)));
306 assert(shared_region->sr_ref_count > 1);
307 SHARED_REGION_TRACE_DEBUG(
308 ("shared_region: mem_entry(%p) <- %p\n",
309 (void *)VM_KERNEL_ADDRPERM(shared_region),
310 (void *)VM_KERNEL_ADDRPERM(shared_region->sr_mem_entry)));
311 return shared_region->sr_mem_entry;
312 }
313
314 vm_map_t
315 vm_shared_region_vm_map(
316 vm_shared_region_t shared_region)
317 {
318 ipc_port_t sr_handle;
319 vm_named_entry_t sr_mem_entry;
320 vm_map_t sr_map;
321
322 SHARED_REGION_TRACE_DEBUG(
323 ("shared_region: -> vm_map(%p)\n",
324 (void *)VM_KERNEL_ADDRPERM(shared_region)));
325 assert(shared_region->sr_ref_count > 1);
326
327 sr_handle = shared_region->sr_mem_entry;
328 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
329 sr_map = sr_mem_entry->backing.map;
330 assert(sr_mem_entry->is_sub_map);
331
332 SHARED_REGION_TRACE_DEBUG(
333 ("shared_region: vm_map(%p) <- %p\n",
334 (void *)VM_KERNEL_ADDRPERM(shared_region),
335 (void *)VM_KERNEL_ADDRPERM(sr_map)));
336 return sr_map;
337 }
338 uint32_t
339 vm_shared_region_get_slide(
340 vm_shared_region_t shared_region)
341 {
342 SHARED_REGION_TRACE_DEBUG(
343 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
344 (void *)VM_KERNEL_ADDRPERM(shared_region)));
345 assert(shared_region->sr_ref_count > 1);
346 SHARED_REGION_TRACE_DEBUG(
347 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
348 (void *)VM_KERNEL_ADDRPERM(shared_region),
349 shared_region->sr_slide_info.slide));
350
351 /* 0 if we haven't slid */
352 assert(shared_region->sr_slide_info.slide_object != NULL ||
353 shared_region->sr_slide_info.slide == 0);
354
355 return shared_region->sr_slide_info.slide;
356 }
357
358 vm_shared_region_slide_info_t
359 vm_shared_region_get_slide_info(
360 vm_shared_region_t shared_region)
361 {
362 SHARED_REGION_TRACE_DEBUG(
363 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
364 (void *)VM_KERNEL_ADDRPERM(shared_region)));
365 assert(shared_region->sr_ref_count > 1);
366 SHARED_REGION_TRACE_DEBUG(
367 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
368 (void *)VM_KERNEL_ADDRPERM(shared_region),
369 (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info)));
370 return &shared_region->sr_slide_info;
371 }
372
373 /*
374 * Set the shared region the process should use.
375 * A NULL new shared region means that we just want to release the old
376 * shared region.
377 * The caller should already have an extra reference on the new shared region
378 * (if any). We release a reference on the old shared region (if any).
379 */
380 void
381 vm_shared_region_set(
382 task_t task,
383 vm_shared_region_t new_shared_region)
384 {
385 vm_shared_region_t old_shared_region;
386
387 SHARED_REGION_TRACE_DEBUG(
388 ("shared_region: -> set(%p, %p)\n",
389 (void *)VM_KERNEL_ADDRPERM(task),
390 (void *)VM_KERNEL_ADDRPERM(new_shared_region)));
391
392 task_lock(task);
393 vm_shared_region_lock();
394
395 old_shared_region = task->shared_region;
396 if (new_shared_region) {
397 assert(new_shared_region->sr_ref_count > 0);
398 }
399
400 task->shared_region = new_shared_region;
401
402 vm_shared_region_unlock();
403 task_unlock(task);
404
405 if (old_shared_region) {
406 assert(old_shared_region->sr_ref_count > 0);
407 vm_shared_region_deallocate(old_shared_region);
408 }
409
410 SHARED_REGION_TRACE_DEBUG(
411 ("shared_region: set(%p) <- old=%p new=%p\n",
412 (void *)VM_KERNEL_ADDRPERM(task),
413 (void *)VM_KERNEL_ADDRPERM(old_shared_region),
414 (void *)VM_KERNEL_ADDRPERM(new_shared_region)));
415 }
416
417 /*
418 * Lookup up the shared region for the desired environment.
419 * If none is found, create a new (empty) one.
420 * Grab an extra reference on the returned shared region, to make sure
421 * it doesn't get destroyed before the caller is done with it. The caller
422 * is responsible for consuming that extra reference if necessary.
423 */
424 vm_shared_region_t
425 vm_shared_region_lookup(
426 void *root_dir,
427 cpu_type_t cputype,
428 cpu_subtype_t cpu_subtype,
429 boolean_t is_64bit)
430 {
431 vm_shared_region_t shared_region;
432 vm_shared_region_t new_shared_region;
433
434 SHARED_REGION_TRACE_DEBUG(
435 ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n",
436
437 (void *)VM_KERNEL_ADDRPERM(root_dir),
438 cputype, cpu_subtype, is_64bit));
439
440 shared_region = NULL;
441 new_shared_region = NULL;
442
443 vm_shared_region_lock();
444 for (;;) {
445 queue_iterate(&vm_shared_region_queue,
446 shared_region,
447 vm_shared_region_t,
448 sr_q) {
449 assert(shared_region->sr_ref_count > 0);
450 if (shared_region->sr_cpu_type == cputype &&
451 shared_region->sr_cpu_subtype == cpu_subtype &&
452 shared_region->sr_root_dir == root_dir &&
453 shared_region->sr_64bit == is_64bit) {
454 /* found a match ! */
455 vm_shared_region_reference_locked(shared_region);
456 goto done;
457 }
458 }
459 if (new_shared_region == NULL) {
460 /* no match: create a new one */
461 vm_shared_region_unlock();
462 new_shared_region = vm_shared_region_create(root_dir,
463 cputype,
464 cpu_subtype,
465 is_64bit);
466 /* do the lookup again, in case we lost a race */
467 vm_shared_region_lock();
468 continue;
469 }
470 /* still no match: use our new one */
471 shared_region = new_shared_region;
472 new_shared_region = NULL;
473 queue_enter(&vm_shared_region_queue,
474 shared_region,
475 vm_shared_region_t,
476 sr_q);
477 break;
478 }
479
480 done:
481 vm_shared_region_unlock();
482
483 if (new_shared_region) {
484 /*
485 * We lost a race with someone else to create a new shared
486 * region for that environment. Get rid of our unused one.
487 */
488 assert(new_shared_region->sr_ref_count == 1);
489 new_shared_region->sr_ref_count--;
490 vm_shared_region_destroy(new_shared_region);
491 new_shared_region = NULL;
492 }
493
494 SHARED_REGION_TRACE_DEBUG(
495 ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n",
496 (void *)VM_KERNEL_ADDRPERM(root_dir),
497 cputype, cpu_subtype, is_64bit,
498 (void *)VM_KERNEL_ADDRPERM(shared_region)));
499
500 assert(shared_region->sr_ref_count > 0);
501 return shared_region;
502 }
503
504 /*
505 * Take an extra reference on a shared region.
506 * The vm_shared_region_lock should already be held by the caller.
507 */
508 static void
509 vm_shared_region_reference_locked(
510 vm_shared_region_t shared_region)
511 {
512 LCK_MTX_ASSERT(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED);
513
514 SHARED_REGION_TRACE_DEBUG(
515 ("shared_region: -> reference_locked(%p)\n",
516 (void *)VM_KERNEL_ADDRPERM(shared_region)));
517 assert(shared_region->sr_ref_count > 0);
518 shared_region->sr_ref_count++;
519
520 if (shared_region->sr_timer_call != NULL) {
521 boolean_t cancelled;
522
523 /* cancel and free any pending timeout */
524 cancelled = thread_call_cancel(shared_region->sr_timer_call);
525 if (cancelled) {
526 thread_call_free(shared_region->sr_timer_call);
527 shared_region->sr_timer_call = NULL;
528 /* release the reference held by the cancelled timer */
529 shared_region->sr_ref_count--;
530 } else {
531 /* the timer will drop the reference and free itself */
532 }
533 }
534
535 SHARED_REGION_TRACE_DEBUG(
536 ("shared_region: reference_locked(%p) <- %d\n",
537 (void *)VM_KERNEL_ADDRPERM(shared_region),
538 shared_region->sr_ref_count));
539 }
540
541 /*
542 * Release a reference on the shared region.
543 * Destroy it if there are no references left.
544 */
545 void
546 vm_shared_region_deallocate(
547 vm_shared_region_t shared_region)
548 {
549 SHARED_REGION_TRACE_DEBUG(
550 ("shared_region: -> deallocate(%p)\n",
551 (void *)VM_KERNEL_ADDRPERM(shared_region)));
552
553 vm_shared_region_lock();
554
555 assert(shared_region->sr_ref_count > 0);
556
557 if (shared_region->sr_root_dir == NULL) {
558 /*
559 * Local (i.e. based on the boot volume) shared regions
560 * can persist or not based on the "shared_region_persistence"
561 * sysctl.
562 * Make sure that this one complies.
563 *
564 * See comments in vm_shared_region_slide() for notes about
565 * shared regions we have slid (which are not torn down currently).
566 */
567 if (shared_region_persistence &&
568 !shared_region->sr_persists) {
569 /* make this one persistent */
570 shared_region->sr_ref_count++;
571 shared_region->sr_persists = TRUE;
572 } else if (!shared_region_persistence &&
573 shared_region->sr_persists) {
574 /* make this one no longer persistent */
575 assert(shared_region->sr_ref_count > 1);
576 shared_region->sr_ref_count--;
577 shared_region->sr_persists = FALSE;
578 }
579 }
580
581 assert(shared_region->sr_ref_count > 0);
582 shared_region->sr_ref_count--;
583 SHARED_REGION_TRACE_DEBUG(
584 ("shared_region: deallocate(%p): ref now %d\n",
585 (void *)VM_KERNEL_ADDRPERM(shared_region),
586 shared_region->sr_ref_count));
587
588 if (shared_region->sr_ref_count == 0) {
589 uint64_t deadline;
590
591 assert(!shared_region->sr_slid);
592
593 if (shared_region->sr_timer_call == NULL) {
594 /* hold one reference for the timer */
595 assert(!shared_region->sr_mapping_in_progress);
596 shared_region->sr_ref_count++;
597
598 /* set up the timer */
599 shared_region->sr_timer_call = thread_call_allocate(
600 (thread_call_func_t) vm_shared_region_timeout,
601 (thread_call_param_t) shared_region);
602
603 /* schedule the timer */
604 clock_interval_to_deadline(shared_region_destroy_delay,
605 1000 * 1000 * 1000,
606 &deadline);
607 thread_call_enter_delayed(shared_region->sr_timer_call,
608 deadline);
609
610 SHARED_REGION_TRACE_DEBUG(
611 ("shared_region: deallocate(%p): armed timer\n",
612 (void *)VM_KERNEL_ADDRPERM(shared_region)));
613
614 vm_shared_region_unlock();
615 } else {
616 /* timer expired: let go of this shared region */
617
618 /*
619 * We can't properly handle teardown of a slid object today.
620 */
621 assert(!shared_region->sr_slid);
622
623 /*
624 * Remove it from the queue first, so no one can find
625 * it...
626 */
627 queue_remove(&vm_shared_region_queue,
628 shared_region,
629 vm_shared_region_t,
630 sr_q);
631 vm_shared_region_unlock();
632
633 /* ... and destroy it */
634 vm_shared_region_destroy(shared_region);
635 shared_region = NULL;
636 }
637 } else {
638 vm_shared_region_unlock();
639 }
640
641 SHARED_REGION_TRACE_DEBUG(
642 ("shared_region: deallocate(%p) <-\n",
643 (void *)VM_KERNEL_ADDRPERM(shared_region)));
644 }
645
646 void
647 vm_shared_region_timeout(
648 thread_call_param_t param0,
649 __unused thread_call_param_t param1)
650 {
651 vm_shared_region_t shared_region;
652
653 shared_region = (vm_shared_region_t) param0;
654
655 vm_shared_region_deallocate(shared_region);
656 }
657
658 /*
659 * Create a new (empty) shared region for a new environment.
660 */
661 static vm_shared_region_t
662 vm_shared_region_create(
663 void *root_dir,
664 cpu_type_t cputype,
665 cpu_subtype_t cpu_subtype,
666 boolean_t is_64bit)
667 {
668 kern_return_t kr;
669 vm_named_entry_t mem_entry;
670 ipc_port_t mem_entry_port;
671 vm_shared_region_t shared_region;
672 vm_shared_region_slide_info_t si;
673 vm_map_t sub_map;
674 mach_vm_offset_t base_address, pmap_nesting_start;
675 mach_vm_size_t size, pmap_nesting_size;
676
677 SHARED_REGION_TRACE_INFO(
678 ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n",
679 (void *)VM_KERNEL_ADDRPERM(root_dir),
680 cputype, cpu_subtype, is_64bit));
681
682 base_address = 0;
683 size = 0;
684 mem_entry = NULL;
685 mem_entry_port = IPC_PORT_NULL;
686 sub_map = VM_MAP_NULL;
687
688 /* create a new shared region structure... */
689 shared_region = kalloc(sizeof(*shared_region));
690 if (shared_region == NULL) {
691 SHARED_REGION_TRACE_ERROR(
692 ("shared_region: create: couldn't allocate\n"));
693 goto done;
694 }
695
696 /* figure out the correct settings for the desired environment */
697 if (is_64bit) {
698 switch (cputype) {
699 #if defined(__arm64__)
700 case CPU_TYPE_ARM64:
701 base_address = SHARED_REGION_BASE_ARM64;
702 size = SHARED_REGION_SIZE_ARM64;
703 pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM64;
704 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM64;
705 break;
706 #elif !defined(__arm__)
707 case CPU_TYPE_I386:
708 base_address = SHARED_REGION_BASE_X86_64;
709 size = SHARED_REGION_SIZE_X86_64;
710 pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64;
711 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64;
712 break;
713 case CPU_TYPE_POWERPC:
714 base_address = SHARED_REGION_BASE_PPC64;
715 size = SHARED_REGION_SIZE_PPC64;
716 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64;
717 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64;
718 break;
719 #endif
720 default:
721 SHARED_REGION_TRACE_ERROR(
722 ("shared_region: create: unknown cpu type %d\n",
723 cputype));
724 kfree(shared_region, sizeof(*shared_region));
725 shared_region = NULL;
726 goto done;
727 }
728 } else {
729 switch (cputype) {
730 #if defined(__arm__) || defined(__arm64__)
731 case CPU_TYPE_ARM:
732 case CPU_TYPE_ARM64:
733 base_address = SHARED_REGION_BASE_ARM;
734 size = SHARED_REGION_SIZE_ARM;
735 pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM;
736 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM;
737 break;
738 #else
739 case CPU_TYPE_I386:
740 base_address = SHARED_REGION_BASE_I386;
741 size = SHARED_REGION_SIZE_I386;
742 pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386;
743 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386;
744 break;
745 case CPU_TYPE_POWERPC:
746 base_address = SHARED_REGION_BASE_PPC;
747 size = SHARED_REGION_SIZE_PPC;
748 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC;
749 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC;
750 break;
751 #endif
752 default:
753 SHARED_REGION_TRACE_ERROR(
754 ("shared_region: create: unknown cpu type %d\n",
755 cputype));
756 kfree(shared_region, sizeof(*shared_region));
757 shared_region = NULL;
758 goto done;
759 }
760 }
761
762 /* create a memory entry structure and a Mach port handle */
763 kr = mach_memory_entry_allocate(&mem_entry,
764 &mem_entry_port);
765 if (kr != KERN_SUCCESS) {
766 kfree(shared_region, sizeof(*shared_region));
767 shared_region = NULL;
768 SHARED_REGION_TRACE_ERROR(
769 ("shared_region: create: "
770 "couldn't allocate mem_entry\n"));
771 goto done;
772 }
773
774 #if defined(__arm__) || defined(__arm64__)
775 {
776 struct pmap *pmap_nested;
777
778 pmap_nested = pmap_create(NULL, 0, is_64bit);
779 if (pmap_nested != PMAP_NULL) {
780 pmap_set_nested(pmap_nested);
781 sub_map = vm_map_create(pmap_nested, 0, size, TRUE);
782 #if defined(__arm64__)
783 if (is_64bit ||
784 page_shift_user32 == SIXTEENK_PAGE_SHIFT) {
785 /* enforce 16KB alignment of VM map entries */
786 vm_map_set_page_shift(sub_map,
787 SIXTEENK_PAGE_SHIFT);
788 }
789 #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
790 /* enforce 16KB alignment for watch targets with new ABI */
791 vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT);
792 #endif /* __arm64__ */
793 } else {
794 sub_map = VM_MAP_NULL;
795 }
796 }
797 #else
798 /* create a VM sub map and its pmap */
799 sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit),
800 0, size,
801 TRUE);
802 #endif
803 if (sub_map == VM_MAP_NULL) {
804 ipc_port_release_send(mem_entry_port);
805 kfree(shared_region, sizeof(*shared_region));
806 shared_region = NULL;
807 SHARED_REGION_TRACE_ERROR(
808 ("shared_region: create: "
809 "couldn't allocate map\n"));
810 goto done;
811 }
812
813 assert(!sub_map->disable_vmentry_reuse);
814 sub_map->is_nested_map = TRUE;
815
816 /* make the memory entry point to the VM sub map */
817 mem_entry->is_sub_map = TRUE;
818 mem_entry->backing.map = sub_map;
819 mem_entry->size = size;
820 mem_entry->protection = VM_PROT_ALL;
821
822 /* make the shared region point at the memory entry */
823 shared_region->sr_mem_entry = mem_entry_port;
824
825 /* fill in the shared region's environment and settings */
826 shared_region->sr_base_address = base_address;
827 shared_region->sr_size = size;
828 shared_region->sr_pmap_nesting_start = pmap_nesting_start;
829 shared_region->sr_pmap_nesting_size = pmap_nesting_size;
830 shared_region->sr_cpu_type = cputype;
831 shared_region->sr_cpu_subtype = cpu_subtype;
832 shared_region->sr_64bit = is_64bit;
833 shared_region->sr_root_dir = root_dir;
834
835 queue_init(&shared_region->sr_q);
836 shared_region->sr_mapping_in_progress = FALSE;
837 shared_region->sr_slide_in_progress = FALSE;
838 shared_region->sr_persists = FALSE;
839 shared_region->sr_slid = FALSE;
840 shared_region->sr_timer_call = NULL;
841 shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
842
843 /* grab a reference for the caller */
844 shared_region->sr_ref_count = 1;
845
846 /* And set up slide info */
847 si = &shared_region->sr_slide_info;
848 si->start = 0;
849 si->end = 0;
850 si->slide = 0;
851 si->slide_object = NULL;
852 si->slide_info_size = 0;
853 si->slide_info_entry = NULL;
854
855 /* Initialize UUID and other metadata */
856 memset(&shared_region->sr_uuid, '\0', sizeof(shared_region->sr_uuid));
857 shared_region->sr_uuid_copied = FALSE;
858 shared_region->sr_images_count = 0;
859 shared_region->sr_images = NULL;
860 done:
861 if (shared_region) {
862 SHARED_REGION_TRACE_INFO(
863 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
864 "base=0x%llx,size=0x%llx) <- "
865 "%p mem=(%p,%p) map=%p pmap=%p\n",
866 (void *)VM_KERNEL_ADDRPERM(root_dir),
867 cputype, cpu_subtype, is_64bit,
868 (long long)base_address,
869 (long long)size,
870 (void *)VM_KERNEL_ADDRPERM(shared_region),
871 (void *)VM_KERNEL_ADDRPERM(mem_entry_port),
872 (void *)VM_KERNEL_ADDRPERM(mem_entry),
873 (void *)VM_KERNEL_ADDRPERM(sub_map),
874 (void *)VM_KERNEL_ADDRPERM(sub_map->pmap)));
875 } else {
876 SHARED_REGION_TRACE_INFO(
877 ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,"
878 "base=0x%llx,size=0x%llx) <- NULL",
879 (void *)VM_KERNEL_ADDRPERM(root_dir),
880 cputype, cpu_subtype, is_64bit,
881 (long long)base_address,
882 (long long)size));
883 }
884 return shared_region;
885 }
886
887 /*
888 * Destroy a now-unused shared region.
889 * The shared region is no longer in the queue and can not be looked up.
890 */
891 static void
892 vm_shared_region_destroy(
893 vm_shared_region_t shared_region)
894 {
895 vm_named_entry_t mem_entry;
896 vm_map_t map;
897
898 SHARED_REGION_TRACE_INFO(
899 ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n",
900 (void *)VM_KERNEL_ADDRPERM(shared_region),
901 (void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir),
902 shared_region->sr_cpu_type,
903 shared_region->sr_cpu_subtype,
904 shared_region->sr_64bit));
905
906 assert(shared_region->sr_ref_count == 0);
907 assert(!shared_region->sr_persists);
908 assert(!shared_region->sr_slid);
909
910 mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject;
911 assert(mem_entry->is_sub_map);
912 assert(!mem_entry->internal);
913 assert(!mem_entry->is_copy);
914 map = mem_entry->backing.map;
915
916 /*
917 * Clean up the pmap first. The virtual addresses that were
918 * entered in this possibly "nested" pmap may have different values
919 * than the VM map's min and max offsets, if the VM sub map was
920 * mapped at a non-zero offset in the processes' main VM maps, which
921 * is usually the case, so the clean-up we do in vm_map_destroy() would
922 * not be enough.
923 */
924 if (map->pmap) {
925 pmap_remove(map->pmap,
926 shared_region->sr_base_address,
927 (shared_region->sr_base_address +
928 shared_region->sr_size));
929 }
930
931 /*
932 * Release our (one and only) handle on the memory entry.
933 * This will generate a no-senders notification, which will be processed
934 * by ipc_kobject_notify(), which will release the one and only
935 * reference on the memory entry and cause it to be destroyed, along
936 * with the VM sub map and its pmap.
937 */
938 mach_memory_entry_port_release(shared_region->sr_mem_entry);
939 mem_entry = NULL;
940 shared_region->sr_mem_entry = IPC_PORT_NULL;
941
942 if (shared_region->sr_timer_call) {
943 thread_call_free(shared_region->sr_timer_call);
944 }
945
946 #if 0
947 /*
948 * If slid, free those resources. We'll want this eventually,
949 * but can't handle it properly today.
950 */
951 si = &shared_region->sr_slide_info;
952 if (si->slide_info_entry) {
953 kmem_free(kernel_map,
954 (vm_offset_t) si->slide_info_entry,
955 (vm_size_t) si->slide_info_size);
956 vm_object_deallocate(si->slide_object);
957 }
958 #endif
959
960 /* release the shared region structure... */
961 kfree(shared_region, sizeof(*shared_region));
962
963 SHARED_REGION_TRACE_DEBUG(
964 ("shared_region: destroy(%p) <-\n",
965 (void *)VM_KERNEL_ADDRPERM(shared_region)));
966 shared_region = NULL;
967 }
968
969 /*
970 * Gets the address of the first (in time) mapping in the shared region.
971 */
972 kern_return_t
973 vm_shared_region_start_address(
974 vm_shared_region_t shared_region,
975 mach_vm_offset_t *start_address)
976 {
977 kern_return_t kr;
978 mach_vm_offset_t sr_base_address;
979 mach_vm_offset_t sr_first_mapping;
980
981 SHARED_REGION_TRACE_DEBUG(
982 ("shared_region: -> start_address(%p)\n",
983 (void *)VM_KERNEL_ADDRPERM(shared_region)));
984 assert(shared_region->sr_ref_count > 1);
985
986 vm_shared_region_lock();
987
988 /*
989 * Wait if there's another thread establishing a mapping
990 * in this shared region right when we're looking at it.
991 * We want a consistent view of the map...
992 */
993 while (shared_region->sr_mapping_in_progress) {
994 /* wait for our turn... */
995 assert(shared_region->sr_ref_count > 1);
996 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
997 THREAD_UNINT);
998 }
999 assert(!shared_region->sr_mapping_in_progress);
1000 assert(shared_region->sr_ref_count > 1);
1001
1002 sr_base_address = shared_region->sr_base_address;
1003 sr_first_mapping = shared_region->sr_first_mapping;
1004
1005 if (sr_first_mapping == (mach_vm_offset_t) -1) {
1006 /* shared region is empty */
1007 kr = KERN_INVALID_ADDRESS;
1008 } else {
1009 kr = KERN_SUCCESS;
1010 *start_address = sr_base_address + sr_first_mapping;
1011 }
1012
1013 vm_shared_region_unlock();
1014
1015 SHARED_REGION_TRACE_DEBUG(
1016 ("shared_region: start_address(%p) <- 0x%llx\n",
1017 (void *)VM_KERNEL_ADDRPERM(shared_region),
1018 (long long)shared_region->sr_base_address));
1019
1020 return kr;
1021 }
1022
1023 void
1024 vm_shared_region_undo_mappings(
1025 vm_map_t sr_map,
1026 mach_vm_offset_t sr_base_address,
1027 struct shared_file_mapping_np *mappings,
1028 unsigned int mappings_count)
1029 {
1030 unsigned int j = 0;
1031 vm_shared_region_t shared_region = NULL;
1032 boolean_t reset_shared_region_state = FALSE;
1033
1034 shared_region = vm_shared_region_get(current_task());
1035 if (shared_region == NULL) {
1036 printf("Failed to undo mappings because of NULL shared region.\n");
1037 return;
1038 }
1039
1040
1041 if (sr_map == NULL) {
1042 ipc_port_t sr_handle;
1043 vm_named_entry_t sr_mem_entry;
1044
1045 vm_shared_region_lock();
1046 assert(shared_region->sr_ref_count > 1);
1047
1048 while (shared_region->sr_mapping_in_progress) {
1049 /* wait for our turn... */
1050 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
1051 THREAD_UNINT);
1052 }
1053 assert(!shared_region->sr_mapping_in_progress);
1054 assert(shared_region->sr_ref_count > 1);
1055 /* let others know we're working in this shared region */
1056 shared_region->sr_mapping_in_progress = TRUE;
1057
1058 vm_shared_region_unlock();
1059
1060 reset_shared_region_state = TRUE;
1061
1062 /* no need to lock because this data is never modified... */
1063 sr_handle = shared_region->sr_mem_entry;
1064 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
1065 sr_map = sr_mem_entry->backing.map;
1066 sr_base_address = shared_region->sr_base_address;
1067 }
1068 /*
1069 * Undo the mappings we've established so far.
1070 */
1071 for (j = 0; j < mappings_count; j++) {
1072 kern_return_t kr2;
1073
1074 if (mappings[j].sfm_size == 0) {
1075 /*
1076 * We didn't establish this
1077 * mapping, so nothing to undo.
1078 */
1079 continue;
1080 }
1081 SHARED_REGION_TRACE_INFO(
1082 ("shared_region: mapping[%d]: "
1083 "address:0x%016llx "
1084 "size:0x%016llx "
1085 "offset:0x%016llx "
1086 "maxprot:0x%x prot:0x%x: "
1087 "undoing...\n",
1088 j,
1089 (long long)mappings[j].sfm_address,
1090 (long long)mappings[j].sfm_size,
1091 (long long)mappings[j].sfm_file_offset,
1092 mappings[j].sfm_max_prot,
1093 mappings[j].sfm_init_prot));
1094 kr2 = mach_vm_deallocate(
1095 sr_map,
1096 (mappings[j].sfm_address -
1097 sr_base_address),
1098 mappings[j].sfm_size);
1099 assert(kr2 == KERN_SUCCESS);
1100 }
1101
1102 if (reset_shared_region_state) {
1103 vm_shared_region_lock();
1104 assert(shared_region->sr_ref_count > 1);
1105 assert(shared_region->sr_mapping_in_progress);
1106 /* we're done working on that shared region */
1107 shared_region->sr_mapping_in_progress = FALSE;
1108 thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1109 vm_shared_region_unlock();
1110 reset_shared_region_state = FALSE;
1111 }
1112
1113 vm_shared_region_deallocate(shared_region);
1114 }
1115
1116 /*
1117 * Establish some mappings of a file in the shared region.
1118 * This is used by "dyld" via the shared_region_map_np() system call
1119 * to populate the shared region with the appropriate shared cache.
1120 *
1121 * One could also call it several times to incrementally load several
1122 * libraries, as long as they do not overlap.
1123 * It will return KERN_SUCCESS if the mappings were successfully established
1124 * or if they were already established identically by another process.
1125 */
1126 kern_return_t
1127 vm_shared_region_map_file(
1128 vm_shared_region_t shared_region,
1129 unsigned int mappings_count,
1130 struct shared_file_mapping_np *mappings,
1131 memory_object_control_t file_control,
1132 memory_object_size_t file_size,
1133 void *root_dir,
1134 uint32_t slide,
1135 user_addr_t slide_start,
1136 user_addr_t slide_size)
1137 {
1138 kern_return_t kr;
1139 vm_object_t file_object;
1140 ipc_port_t sr_handle;
1141 vm_named_entry_t sr_mem_entry;
1142 vm_map_t sr_map;
1143 mach_vm_offset_t sr_base_address;
1144 unsigned int i;
1145 mach_port_t map_port;
1146 vm_map_offset_t target_address;
1147 vm_object_t object;
1148 vm_object_size_t obj_size;
1149 struct shared_file_mapping_np *mapping_to_slide = NULL;
1150 mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1;
1151 mach_vm_offset_t slid_mapping = (mach_vm_offset_t) -1;
1152 vm_map_offset_t lowest_unnestable_addr = 0;
1153 vm_map_kernel_flags_t vmk_flags;
1154 mach_vm_offset_t sfm_min_address = ~0;
1155 mach_vm_offset_t sfm_max_address = 0;
1156 struct _dyld_cache_header sr_cache_header;
1157
1158 #if __arm64__
1159 if ((shared_region->sr_64bit ||
1160 page_shift_user32 == SIXTEENK_PAGE_SHIFT) &&
1161 ((slide & SIXTEENK_PAGE_MASK) != 0)) {
1162 printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
1163 __FUNCTION__, slide);
1164 kr = KERN_INVALID_ARGUMENT;
1165 goto done;
1166 }
1167 #endif /* __arm64__ */
1168
1169 kr = KERN_SUCCESS;
1170
1171 vm_shared_region_lock();
1172 assert(shared_region->sr_ref_count > 1);
1173
1174 if (shared_region->sr_root_dir != root_dir) {
1175 /*
1176 * This shared region doesn't match the current root
1177 * directory of this process. Deny the mapping to
1178 * avoid tainting the shared region with something that
1179 * doesn't quite belong into it.
1180 */
1181 vm_shared_region_unlock();
1182 kr = KERN_PROTECTION_FAILURE;
1183 goto done;
1184 }
1185
1186 /*
1187 * Make sure we handle only one mapping at a time in a given
1188 * shared region, to avoid race conditions. This should not
1189 * happen frequently...
1190 */
1191 while (shared_region->sr_mapping_in_progress) {
1192 /* wait for our turn... */
1193 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
1194 THREAD_UNINT);
1195 }
1196 assert(!shared_region->sr_mapping_in_progress);
1197 assert(shared_region->sr_ref_count > 1);
1198 /* let others know we're working in this shared region */
1199 shared_region->sr_mapping_in_progress = TRUE;
1200
1201 vm_shared_region_unlock();
1202
1203 /* no need to lock because this data is never modified... */
1204 sr_handle = shared_region->sr_mem_entry;
1205 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
1206 sr_map = sr_mem_entry->backing.map;
1207 sr_base_address = shared_region->sr_base_address;
1208
1209 SHARED_REGION_TRACE_DEBUG(
1210 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1211 (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count,
1212 (void *)VM_KERNEL_ADDRPERM(mappings),
1213 (void *)VM_KERNEL_ADDRPERM(file_control), file_size));
1214
1215 /* get the VM object associated with the file to be mapped */
1216 file_object = memory_object_control_to_vm_object(file_control);
1217
1218 assert(file_object);
1219
1220 /* establish the mappings */
1221 for (i = 0; i < mappings_count; i++) {
1222 SHARED_REGION_TRACE_INFO(
1223 ("shared_region: mapping[%d]: "
1224 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1225 "maxprot:0x%x prot:0x%x\n",
1226 i,
1227 (long long)mappings[i].sfm_address,
1228 (long long)mappings[i].sfm_size,
1229 (long long)mappings[i].sfm_file_offset,
1230 mappings[i].sfm_max_prot,
1231 mappings[i].sfm_init_prot));
1232
1233 if (mappings[i].sfm_address < sfm_min_address) {
1234 sfm_min_address = mappings[i].sfm_address;
1235 }
1236
1237 if ((mappings[i].sfm_address + mappings[i].sfm_size) > sfm_max_address) {
1238 sfm_max_address = mappings[i].sfm_address + mappings[i].sfm_size;
1239 }
1240
1241 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
1242 /* zero-filled memory */
1243 map_port = MACH_PORT_NULL;
1244 } else {
1245 /* file-backed memory */
1246 __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager);
1247 }
1248
1249 if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) {
1250 /*
1251 * This is the mapping that needs to be slid.
1252 */
1253 if (mapping_to_slide != NULL) {
1254 SHARED_REGION_TRACE_INFO(
1255 ("shared_region: mapping[%d]: "
1256 "address:0x%016llx size:0x%016llx "
1257 "offset:0x%016llx "
1258 "maxprot:0x%x prot:0x%x "
1259 "will not be slid as only one such mapping is allowed...\n",
1260 i,
1261 (long long)mappings[i].sfm_address,
1262 (long long)mappings[i].sfm_size,
1263 (long long)mappings[i].sfm_file_offset,
1264 mappings[i].sfm_max_prot,
1265 mappings[i].sfm_init_prot));
1266 } else {
1267 mapping_to_slide = &mappings[i];
1268 }
1269 }
1270
1271 /* mapping's address is relative to the shared region base */
1272 target_address =
1273 mappings[i].sfm_address - sr_base_address;
1274
1275 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1276 vmk_flags.vmkf_already = TRUE;
1277
1278 /* establish that mapping, OK if it's "already" there */
1279 if (map_port == MACH_PORT_NULL) {
1280 /*
1281 * We want to map some anonymous memory in a
1282 * shared region.
1283 * We have to create the VM object now, so that it
1284 * can be mapped "copy-on-write".
1285 */
1286 obj_size = vm_map_round_page(mappings[i].sfm_size,
1287 VM_MAP_PAGE_MASK(sr_map));
1288 object = vm_object_allocate(obj_size);
1289 if (object == VM_OBJECT_NULL) {
1290 kr = KERN_RESOURCE_SHORTAGE;
1291 } else {
1292 kr = vm_map_enter(
1293 sr_map,
1294 &target_address,
1295 vm_map_round_page(mappings[i].sfm_size,
1296 VM_MAP_PAGE_MASK(sr_map)),
1297 0,
1298 VM_FLAGS_FIXED,
1299 vmk_flags,
1300 VM_KERN_MEMORY_NONE,
1301 object,
1302 0,
1303 TRUE,
1304 mappings[i].sfm_init_prot & VM_PROT_ALL,
1305 mappings[i].sfm_max_prot & VM_PROT_ALL,
1306 VM_INHERIT_DEFAULT);
1307 }
1308 } else {
1309 object = VM_OBJECT_NULL; /* no anonymous memory here */
1310 kr = vm_map_enter_mem_object(
1311 sr_map,
1312 &target_address,
1313 vm_map_round_page(mappings[i].sfm_size,
1314 VM_MAP_PAGE_MASK(sr_map)),
1315 0,
1316 VM_FLAGS_FIXED,
1317 vmk_flags,
1318 VM_KERN_MEMORY_NONE,
1319 map_port,
1320 mappings[i].sfm_file_offset,
1321 TRUE,
1322 mappings[i].sfm_init_prot & VM_PROT_ALL,
1323 mappings[i].sfm_max_prot & VM_PROT_ALL,
1324 VM_INHERIT_DEFAULT);
1325 }
1326
1327 if (kr == KERN_SUCCESS) {
1328 /*
1329 * Record the first (chronologically) successful
1330 * mapping in this shared region.
1331 * We're protected by "sr_mapping_in_progress" here,
1332 * so no need to lock "shared_region".
1333 */
1334 if (first_mapping == (mach_vm_offset_t) -1) {
1335 first_mapping = target_address;
1336 }
1337
1338 if ((slid_mapping == (mach_vm_offset_t) -1) &&
1339 (mapping_to_slide == &mappings[i])) {
1340 slid_mapping = target_address;
1341 }
1342
1343 /*
1344 * Record the lowest writable address in this
1345 * sub map, to log any unexpected unnesting below
1346 * that address (see log_unnest_badness()).
1347 */
1348 if ((mappings[i].sfm_init_prot & VM_PROT_WRITE) &&
1349 sr_map->is_nested_map &&
1350 (lowest_unnestable_addr == 0 ||
1351 (target_address < lowest_unnestable_addr))) {
1352 lowest_unnestable_addr = target_address;
1353 }
1354 } else {
1355 if (map_port == MACH_PORT_NULL) {
1356 /*
1357 * Get rid of the VM object we just created
1358 * but failed to map.
1359 */
1360 vm_object_deallocate(object);
1361 object = VM_OBJECT_NULL;
1362 }
1363 if (kr == KERN_MEMORY_PRESENT) {
1364 /*
1365 * This exact mapping was already there:
1366 * that's fine.
1367 */
1368 SHARED_REGION_TRACE_INFO(
1369 ("shared_region: mapping[%d]: "
1370 "address:0x%016llx size:0x%016llx "
1371 "offset:0x%016llx "
1372 "maxprot:0x%x prot:0x%x "
1373 "already mapped...\n",
1374 i,
1375 (long long)mappings[i].sfm_address,
1376 (long long)mappings[i].sfm_size,
1377 (long long)mappings[i].sfm_file_offset,
1378 mappings[i].sfm_max_prot,
1379 mappings[i].sfm_init_prot));
1380 /*
1381 * We didn't establish this mapping ourselves;
1382 * let's reset its size, so that we do not
1383 * attempt to undo it if an error occurs later.
1384 */
1385 mappings[i].sfm_size = 0;
1386 kr = KERN_SUCCESS;
1387 } else {
1388 /* this mapping failed ! */
1389 SHARED_REGION_TRACE_ERROR(
1390 ("shared_region: mapping[%d]: "
1391 "address:0x%016llx size:0x%016llx "
1392 "offset:0x%016llx "
1393 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1394 i,
1395 (long long)mappings[i].sfm_address,
1396 (long long)mappings[i].sfm_size,
1397 (long long)mappings[i].sfm_file_offset,
1398 mappings[i].sfm_max_prot,
1399 mappings[i].sfm_init_prot,
1400 kr));
1401
1402 vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i);
1403 break;
1404 }
1405 }
1406 }
1407
1408 if (kr == KERN_SUCCESS &&
1409 slide_size != 0 &&
1410 mapping_to_slide != NULL) {
1411 kr = vm_shared_region_slide(slide,
1412 mapping_to_slide->sfm_file_offset,
1413 mapping_to_slide->sfm_size,
1414 slide_start,
1415 slide_size,
1416 slid_mapping,
1417 file_control);
1418 if (kr != KERN_SUCCESS) {
1419 SHARED_REGION_TRACE_ERROR(
1420 ("shared_region: region_slide("
1421 "slide:0x%x start:0x%016llx "
1422 "size:0x%016llx) failed 0x%x\n",
1423 slide,
1424 (long long)slide_start,
1425 (long long)slide_size,
1426 kr));
1427 vm_shared_region_undo_mappings(sr_map,
1428 sr_base_address,
1429 mappings,
1430 mappings_count);
1431 }
1432 }
1433
1434 if (kr == KERN_SUCCESS) {
1435 /* adjust the map's "lowest_unnestable_start" */
1436 lowest_unnestable_addr &= ~(pmap_nesting_size_min - 1);
1437 if (lowest_unnestable_addr !=
1438 sr_map->lowest_unnestable_start) {
1439 vm_map_lock(sr_map);
1440 sr_map->lowest_unnestable_start =
1441 lowest_unnestable_addr;
1442 vm_map_unlock(sr_map);
1443 }
1444 }
1445
1446 vm_shared_region_lock();
1447 assert(shared_region->sr_ref_count > 1);
1448 assert(shared_region->sr_mapping_in_progress);
1449
1450 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1451 if (kr == KERN_SUCCESS &&
1452 shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
1453 shared_region->sr_first_mapping = first_mapping;
1454 }
1455
1456 /*
1457 * copy in the shared region UUID to the shared region structure.
1458 * we do this indirectly by first copying in the shared cache header
1459 * and then copying the UUID from there because we'll need to look
1460 * at other content from the shared cache header.
1461 */
1462 if (kr == KERN_SUCCESS && !shared_region->sr_uuid_copied) {
1463 int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping),
1464 (char *)&sr_cache_header,
1465 sizeof(sr_cache_header));
1466 if (error == 0) {
1467 memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid));
1468 shared_region->sr_uuid_copied = TRUE;
1469 } else {
1470 #if DEVELOPMENT || DEBUG
1471 panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1472 "offset:0 size:0x%016llx) failed with %d\n",
1473 (long long)shared_region->sr_base_address,
1474 (long long)shared_region->sr_first_mapping,
1475 (long long)sizeof(sr_cache_header),
1476 error);
1477 #endif /* DEVELOPMENT || DEBUG */
1478 shared_region->sr_uuid_copied = FALSE;
1479 }
1480 }
1481
1482 /*
1483 * If the shared cache is associated with the init task (and is therefore the system shared cache),
1484 * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
1485 */
1486 boolean_t is_init_task = (task_pid(current_task()) == 1);
1487 if (shared_region->sr_uuid_copied && is_init_task) {
1488 /* Copy in the shared cache layout if we're running with a locally built shared cache */
1489 if (sr_cache_header.locallyBuiltCache) {
1490 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START);
1491 size_t image_array_length = (sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info));
1492 struct _dyld_cache_image_text_info *sr_image_layout = kalloc(image_array_length);
1493 int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping +
1494 sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length);
1495 if (error == 0) {
1496 shared_region->sr_images = kalloc(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64));
1497 for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) {
1498 memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid,
1499 sizeof(shared_region->sr_images[index].imageUUID));
1500 shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress;
1501 }
1502
1503 assert(sr_cache_header.imagesTextCount < UINT32_MAX);
1504 shared_region->sr_images_count = (uint32_t) sr_cache_header.imagesTextCount;
1505 } else {
1506 #if DEVELOPMENT || DEBUG
1507 panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
1508 "offset:0x%016llx size:0x%016llx) failed with %d\n",
1509 (long long)shared_region->sr_base_address,
1510 (long long)shared_region->sr_first_mapping,
1511 (long long)sr_cache_header.imagesTextOffset,
1512 (long long)image_array_length,
1513 error);
1514 #endif /* DEVELOPMENT || DEBUG */
1515 }
1516 KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count);
1517 kfree(sr_image_layout, image_array_length);
1518 sr_image_layout = NULL;
1519 }
1520 init_task_shared_region = shared_region;
1521 }
1522
1523 if (kr == KERN_SUCCESS) {
1524 /*
1525 * If we succeeded, we know the bounds of the shared region.
1526 * Trim our pmaps to only cover this range (if applicable to
1527 * this platform).
1528 */
1529 pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_min_address, sfm_max_address - sfm_min_address);
1530 }
1531
1532 /* we're done working on that shared region */
1533 shared_region->sr_mapping_in_progress = FALSE;
1534 thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1535 vm_shared_region_unlock();
1536
1537 done:
1538 SHARED_REGION_TRACE_DEBUG(
1539 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1540 (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count,
1541 (void *)VM_KERNEL_ADDRPERM(mappings),
1542 (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr));
1543 return kr;
1544 }
1545
1546 /*
1547 * Retrieve a task's shared region and grab an extra reference to
1548 * make sure it doesn't disappear while the caller is using it.
1549 * The caller is responsible for consuming that extra reference if
1550 * necessary.
1551 *
1552 * This also tries to trim the pmap for the shared region.
1553 */
1554 vm_shared_region_t
1555 vm_shared_region_trim_and_get(task_t task)
1556 {
1557 vm_shared_region_t shared_region;
1558 ipc_port_t sr_handle;
1559 vm_named_entry_t sr_mem_entry;
1560 vm_map_t sr_map;
1561
1562 /* Get the shared region and the map. */
1563 shared_region = vm_shared_region_get(task);
1564 if (shared_region == NULL) {
1565 return NULL;
1566 }
1567
1568 sr_handle = shared_region->sr_mem_entry;
1569 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
1570 sr_map = sr_mem_entry->backing.map;
1571
1572 /* Trim the pmap if possible. */
1573 pmap_trim(task->map->pmap, sr_map->pmap, 0, 0, 0);
1574
1575 return shared_region;
1576 }
1577
1578 /*
1579 * Enter the appropriate shared region into "map" for "task".
1580 * This involves looking up the shared region (and possibly creating a new
1581 * one) for the desired environment, then mapping the VM sub map into the
1582 * task's VM "map", with the appropriate level of pmap-nesting.
1583 */
1584 kern_return_t
1585 vm_shared_region_enter(
1586 struct _vm_map *map,
1587 struct task *task,
1588 boolean_t is_64bit,
1589 void *fsroot,
1590 cpu_type_t cpu,
1591 cpu_subtype_t cpu_subtype)
1592 {
1593 kern_return_t kr;
1594 vm_shared_region_t shared_region;
1595 vm_map_offset_t sr_address, sr_offset, target_address;
1596 vm_map_size_t sr_size, mapping_size;
1597 vm_map_offset_t sr_pmap_nesting_start;
1598 vm_map_size_t sr_pmap_nesting_size;
1599 ipc_port_t sr_handle;
1600 vm_prot_t cur_prot, max_prot;
1601
1602 SHARED_REGION_TRACE_DEBUG(
1603 ("shared_region: -> "
1604 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
1605 (void *)VM_KERNEL_ADDRPERM(map),
1606 (void *)VM_KERNEL_ADDRPERM(task),
1607 (void *)VM_KERNEL_ADDRPERM(fsroot),
1608 cpu, cpu_subtype, is_64bit));
1609
1610 /* lookup (create if needed) the shared region for this environment */
1611 shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit);
1612 if (shared_region == NULL) {
1613 /* this should not happen ! */
1614 SHARED_REGION_TRACE_ERROR(
1615 ("shared_region: -> "
1616 "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): "
1617 "lookup failed !\n",
1618 (void *)VM_KERNEL_ADDRPERM(map),
1619 (void *)VM_KERNEL_ADDRPERM(task),
1620 (void *)VM_KERNEL_ADDRPERM(fsroot),
1621 cpu, cpu_subtype, is_64bit));
1622 //panic("shared_region_enter: lookup failed\n");
1623 return KERN_FAILURE;
1624 }
1625
1626 kr = KERN_SUCCESS;
1627 /* no need to lock since this data is never modified */
1628 sr_address = shared_region->sr_base_address;
1629 sr_size = shared_region->sr_size;
1630 sr_handle = shared_region->sr_mem_entry;
1631 sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start;
1632 sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size;
1633
1634 cur_prot = VM_PROT_READ;
1635 #if __x86_64__
1636 /*
1637 * XXX BINARY COMPATIBILITY
1638 * java6 apparently needs to modify some code in the
1639 * dyld shared cache and needs to be allowed to add
1640 * write access...
1641 */
1642 max_prot = VM_PROT_ALL;
1643 #else /* __x86_64__ */
1644 max_prot = VM_PROT_READ;
1645 #endif /* __x86_64__ */
1646 /*
1647 * Start mapping the shared region's VM sub map into the task's VM map.
1648 */
1649 sr_offset = 0;
1650
1651 if (sr_pmap_nesting_start > sr_address) {
1652 /* we need to map a range without pmap-nesting first */
1653 target_address = sr_address;
1654 mapping_size = sr_pmap_nesting_start - sr_address;
1655 kr = vm_map_enter_mem_object(
1656 map,
1657 &target_address,
1658 mapping_size,
1659 0,
1660 VM_FLAGS_FIXED,
1661 VM_MAP_KERNEL_FLAGS_NONE,
1662 VM_KERN_MEMORY_NONE,
1663 sr_handle,
1664 sr_offset,
1665 TRUE,
1666 cur_prot,
1667 max_prot,
1668 VM_INHERIT_SHARE);
1669 if (kr != KERN_SUCCESS) {
1670 SHARED_REGION_TRACE_ERROR(
1671 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1672 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1673 (void *)VM_KERNEL_ADDRPERM(map),
1674 (void *)VM_KERNEL_ADDRPERM(task),
1675 (void *)VM_KERNEL_ADDRPERM(fsroot),
1676 cpu, cpu_subtype, is_64bit,
1677 (long long)target_address,
1678 (long long)mapping_size,
1679 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1680 goto done;
1681 }
1682 SHARED_REGION_TRACE_DEBUG(
1683 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1684 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1685 (void *)VM_KERNEL_ADDRPERM(map),
1686 (void *)VM_KERNEL_ADDRPERM(task),
1687 (void *)VM_KERNEL_ADDRPERM(fsroot),
1688 cpu, cpu_subtype, is_64bit,
1689 (long long)target_address, (long long)mapping_size,
1690 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1691 sr_offset += mapping_size;
1692 sr_size -= mapping_size;
1693 }
1694 /*
1695 * We may need to map several pmap-nested portions, due to platform
1696 * specific restrictions on pmap nesting.
1697 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1698 */
1699 for (;
1700 sr_pmap_nesting_size > 0;
1701 sr_offset += mapping_size,
1702 sr_size -= mapping_size,
1703 sr_pmap_nesting_size -= mapping_size) {
1704 target_address = sr_address + sr_offset;
1705 mapping_size = sr_pmap_nesting_size;
1706 if (mapping_size > pmap_nesting_size_max) {
1707 mapping_size = (vm_map_offset_t) pmap_nesting_size_max;
1708 }
1709 kr = vm_map_enter_mem_object(
1710 map,
1711 &target_address,
1712 mapping_size,
1713 0,
1714 VM_FLAGS_FIXED,
1715 VM_MAP_KERNEL_FLAGS_NONE,
1716 VM_MEMORY_SHARED_PMAP,
1717 sr_handle,
1718 sr_offset,
1719 TRUE,
1720 cur_prot,
1721 max_prot,
1722 VM_INHERIT_SHARE);
1723 if (kr != KERN_SUCCESS) {
1724 SHARED_REGION_TRACE_ERROR(
1725 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1726 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1727 (void *)VM_KERNEL_ADDRPERM(map),
1728 (void *)VM_KERNEL_ADDRPERM(task),
1729 (void *)VM_KERNEL_ADDRPERM(fsroot),
1730 cpu, cpu_subtype, is_64bit,
1731 (long long)target_address,
1732 (long long)mapping_size,
1733 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1734 goto done;
1735 }
1736 SHARED_REGION_TRACE_DEBUG(
1737 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1738 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1739 (void *)VM_KERNEL_ADDRPERM(map),
1740 (void *)VM_KERNEL_ADDRPERM(task),
1741 (void *)VM_KERNEL_ADDRPERM(fsroot),
1742 cpu, cpu_subtype, is_64bit,
1743 (long long)target_address, (long long)mapping_size,
1744 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1745 }
1746 if (sr_size > 0) {
1747 /* and there's some left to be mapped without pmap-nesting */
1748 target_address = sr_address + sr_offset;
1749 mapping_size = sr_size;
1750 kr = vm_map_enter_mem_object(
1751 map,
1752 &target_address,
1753 mapping_size,
1754 0,
1755 VM_FLAGS_FIXED,
1756 VM_MAP_KERNEL_FLAGS_NONE,
1757 VM_KERN_MEMORY_NONE,
1758 sr_handle,
1759 sr_offset,
1760 TRUE,
1761 cur_prot,
1762 max_prot,
1763 VM_INHERIT_SHARE);
1764 if (kr != KERN_SUCCESS) {
1765 SHARED_REGION_TRACE_ERROR(
1766 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1767 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1768 (void *)VM_KERNEL_ADDRPERM(map),
1769 (void *)VM_KERNEL_ADDRPERM(task),
1770 (void *)VM_KERNEL_ADDRPERM(fsroot),
1771 cpu, cpu_subtype, is_64bit,
1772 (long long)target_address,
1773 (long long)mapping_size,
1774 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1775 goto done;
1776 }
1777 SHARED_REGION_TRACE_DEBUG(
1778 ("shared_region: enter(%p,%p,%p,%d,%d,%d): "
1779 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1780 (void *)VM_KERNEL_ADDRPERM(map),
1781 (void *)VM_KERNEL_ADDRPERM(task),
1782 (void *)VM_KERNEL_ADDRPERM(fsroot),
1783 cpu, cpu_subtype, is_64bit,
1784 (long long)target_address, (long long)mapping_size,
1785 (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
1786 sr_offset += mapping_size;
1787 sr_size -= mapping_size;
1788 }
1789 assert(sr_size == 0);
1790
1791 done:
1792 if (kr == KERN_SUCCESS) {
1793 /* let the task use that shared region */
1794 vm_shared_region_set(task, shared_region);
1795 } else {
1796 /* drop our reference since we're not using it */
1797 vm_shared_region_deallocate(shared_region);
1798 vm_shared_region_set(task, NULL);
1799 }
1800
1801 SHARED_REGION_TRACE_DEBUG(
1802 ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n",
1803 (void *)VM_KERNEL_ADDRPERM(map),
1804 (void *)VM_KERNEL_ADDRPERM(task),
1805 (void *)VM_KERNEL_ADDRPERM(fsroot),
1806 cpu, cpu_subtype, is_64bit, kr));
1807 return kr;
1808 }
1809
1810 #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/
1811 struct vm_shared_region_slide_info slide_info;
1812
1813 kern_return_t
1814 vm_shared_region_sliding_valid(uint32_t slide)
1815 {
1816 kern_return_t kr = KERN_SUCCESS;
1817 vm_shared_region_t sr = vm_shared_region_get(current_task());
1818
1819 /* No region yet? we're fine. */
1820 if (sr == NULL) {
1821 return kr;
1822 }
1823
1824 if ((sr->sr_slid == TRUE) && slide) {
1825 if (slide != vm_shared_region_get_slide_info(sr)->slide) {
1826 printf("Only one shared region can be slid\n");
1827 kr = KERN_FAILURE;
1828 } else {
1829 /*
1830 * Request for sliding when we've
1831 * already done it with exactly the
1832 * same slide value before.
1833 * This isn't wrong technically but
1834 * we don't want to slide again and
1835 * so we return this value.
1836 */
1837 kr = KERN_INVALID_ARGUMENT;
1838 }
1839 }
1840 vm_shared_region_deallocate(sr);
1841 return kr;
1842 }
1843
1844 kern_return_t
1845 vm_shared_region_slide_mapping(
1846 vm_shared_region_t sr,
1847 mach_vm_size_t slide_info_size,
1848 mach_vm_offset_t start,
1849 mach_vm_size_t size,
1850 mach_vm_offset_t slid_mapping,
1851 uint32_t slide,
1852 memory_object_control_t sr_file_control)
1853 {
1854 kern_return_t kr;
1855 vm_object_t object;
1856 vm_shared_region_slide_info_t si;
1857 vm_offset_t slide_info_entry;
1858 vm_map_entry_t slid_entry, tmp_entry;
1859 struct vm_map_entry tmp_entry_store;
1860 memory_object_t sr_pager;
1861 vm_map_t sr_map;
1862 int vm_flags;
1863 vm_map_kernel_flags_t vmk_flags;
1864 vm_map_offset_t map_addr;
1865
1866 tmp_entry = VM_MAP_ENTRY_NULL;
1867 sr_pager = MEMORY_OBJECT_NULL;
1868 object = VM_OBJECT_NULL;
1869 slide_info_entry = 0;
1870
1871 assert(sr->sr_slide_in_progress);
1872 assert(!sr->sr_slid);
1873
1874 si = vm_shared_region_get_slide_info(sr);
1875 assert(si->slide_object == VM_OBJECT_NULL);
1876 assert(si->slide_info_entry == NULL);
1877
1878 if (sr_file_control == MEMORY_OBJECT_CONTROL_NULL) {
1879 return KERN_INVALID_ARGUMENT;
1880 }
1881 if (slide_info_size > SANE_SLIDE_INFO_SIZE) {
1882 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size);
1883 return KERN_FAILURE;
1884 }
1885
1886 kr = kmem_alloc(kernel_map,
1887 (vm_offset_t *) &slide_info_entry,
1888 (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK);
1889 if (kr != KERN_SUCCESS) {
1890 return kr;
1891 }
1892
1893 object = memory_object_control_to_vm_object(sr_file_control);
1894 if (object == VM_OBJECT_NULL || object->internal) {
1895 object = VM_OBJECT_NULL;
1896 kr = KERN_INVALID_ADDRESS;
1897 goto done;
1898 }
1899
1900 vm_object_lock(object);
1901 vm_object_reference_locked(object); /* for si->slide_object */
1902 object->object_is_shared_cache = TRUE;
1903 vm_object_unlock(object);
1904
1905 si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry;
1906 si->slide_info_size = slide_info_size;
1907
1908 assert(slid_mapping != (mach_vm_offset_t) -1);
1909 si->slid_address = slid_mapping + sr->sr_base_address;
1910 si->slide_object = object;
1911 si->start = start;
1912 si->end = si->start + size;
1913 si->slide = slide;
1914
1915 /* find the shared region's map entry to slide */
1916 sr_map = vm_shared_region_vm_map(sr);
1917 vm_map_lock_read(sr_map);
1918 if (!vm_map_lookup_entry(sr_map,
1919 slid_mapping,
1920 &slid_entry)) {
1921 /* no mapping there */
1922 vm_map_unlock(sr_map);
1923 kr = KERN_INVALID_ARGUMENT;
1924 goto done;
1925 }
1926 /*
1927 * We might want to clip the entry to cover only the portion that
1928 * needs sliding (offsets si->start to si->end in the shared cache
1929 * file at the bottom of the shadow chain).
1930 * In practice, it seems to cover the entire DATA segment...
1931 */
1932 tmp_entry_store = *slid_entry;
1933 tmp_entry = &tmp_entry_store;
1934 slid_entry = VM_MAP_ENTRY_NULL;
1935 /* extra ref to keep object alive while map is unlocked */
1936 vm_object_reference(VME_OBJECT(tmp_entry));
1937 vm_map_unlock_read(sr_map);
1938
1939 /* create a "shared_region" sliding pager */
1940 sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry),
1941 VME_OFFSET(tmp_entry),
1942 si);
1943 if (sr_pager == NULL) {
1944 kr = KERN_RESOURCE_SHORTAGE;
1945 goto done;
1946 }
1947
1948 /* map that pager over the portion of the mapping that needs sliding */
1949 vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
1950 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1951 vmk_flags.vmkf_overwrite_immutable = TRUE;
1952 map_addr = tmp_entry->vme_start;
1953 kr = vm_map_enter_mem_object(sr_map,
1954 &map_addr,
1955 (tmp_entry->vme_end -
1956 tmp_entry->vme_start),
1957 (mach_vm_offset_t) 0,
1958 vm_flags,
1959 vmk_flags,
1960 VM_KERN_MEMORY_NONE,
1961 (ipc_port_t)(uintptr_t) sr_pager,
1962 0,
1963 TRUE,
1964 tmp_entry->protection,
1965 tmp_entry->max_protection,
1966 tmp_entry->inheritance);
1967 assertf(kr == KERN_SUCCESS, "kr = 0x%x\n", kr);
1968 assertf(map_addr == tmp_entry->vme_start,
1969 "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
1970 (uint64_t)map_addr,
1971 (uint64_t) tmp_entry->vme_start,
1972 tmp_entry);
1973
1974 /* success! */
1975 kr = KERN_SUCCESS;
1976
1977 done:
1978 if (sr_pager) {
1979 /*
1980 * Release the sr_pager reference obtained by
1981 * shared_region_pager_setup().
1982 * The mapping (if it succeeded) is now holding a reference on
1983 * the memory object.
1984 */
1985 memory_object_deallocate(sr_pager);
1986 sr_pager = MEMORY_OBJECT_NULL;
1987 }
1988 if (tmp_entry) {
1989 /* release extra ref on tmp_entry's VM object */
1990 vm_object_deallocate(VME_OBJECT(tmp_entry));
1991 tmp_entry = VM_MAP_ENTRY_NULL;
1992 }
1993
1994 if (kr != KERN_SUCCESS) {
1995 /* cleanup */
1996 if (slide_info_entry) {
1997 kmem_free(kernel_map, slide_info_entry, slide_info_size);
1998 slide_info_entry = 0;
1999 }
2000 if (si->slide_object) {
2001 vm_object_deallocate(si->slide_object);
2002 si->slide_object = VM_OBJECT_NULL;
2003 }
2004 }
2005 return kr;
2006 }
2007
2008 void*
2009 vm_shared_region_get_slide_info_entry(vm_shared_region_t sr)
2010 {
2011 return (void*)sr->sr_slide_info.slide_info_entry;
2012 }
2013
2014 static kern_return_t
2015 vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info)
2016 {
2017 uint32_t pageIndex = 0;
2018 uint16_t entryIndex = 0;
2019 uint16_t *toc = NULL;
2020
2021 toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
2022 for (; pageIndex < s_info->toc_count; pageIndex++) {
2023 entryIndex = (uint16_t)(toc[pageIndex]);
2024
2025 if (entryIndex >= s_info->entry_count) {
2026 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count);
2027 return KERN_FAILURE;
2028 }
2029 }
2030 return KERN_SUCCESS;
2031 }
2032
2033 static kern_return_t
2034 vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info, mach_vm_size_t slide_info_size)
2035 {
2036 if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2037 return KERN_FAILURE;
2038 }
2039
2040 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2041
2042 uint32_t page_starts_count = s_info->page_starts_count;
2043 uint32_t page_extras_count = s_info->page_extras_count;
2044 mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
2045 if (num_trailing_entries < page_starts_count) {
2046 return KERN_FAILURE;
2047 }
2048
2049 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2050 mach_vm_size_t trailing_size = num_trailing_entries << 1;
2051 if (trailing_size >> 1 != num_trailing_entries) {
2052 return KERN_FAILURE;
2053 }
2054
2055 mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2056 if (required_size < sizeof(*s_info)) {
2057 return KERN_FAILURE;
2058 }
2059
2060 if (required_size > slide_info_size) {
2061 return KERN_FAILURE;
2062 }
2063
2064 return KERN_SUCCESS;
2065 }
2066
2067 static kern_return_t
2068 vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info, mach_vm_size_t slide_info_size)
2069 {
2070 if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2071 printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info->page_size, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE);
2072 return KERN_FAILURE;
2073 }
2074
2075 uint32_t page_starts_count = s_info->page_starts_count;
2076 mach_vm_size_t num_trailing_entries = page_starts_count;
2077 mach_vm_size_t trailing_size = num_trailing_entries << 1;
2078 mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2079 if (required_size < sizeof(*s_info)) {
2080 printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)sizeof(*s_info));
2081 return KERN_FAILURE;
2082 }
2083
2084 if (required_size > slide_info_size) {
2085 printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)slide_info_size);
2086 return KERN_FAILURE;
2087 }
2088
2089 return KERN_SUCCESS;
2090 }
2091
2092 static kern_return_t
2093 vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info, mach_vm_size_t slide_info_size)
2094 {
2095 if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
2096 return KERN_FAILURE;
2097 }
2098
2099 /* Ensure that the slide info doesn't reference any data outside of its bounds. */
2100
2101 uint32_t page_starts_count = s_info->page_starts_count;
2102 uint32_t page_extras_count = s_info->page_extras_count;
2103 mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
2104 if (num_trailing_entries < page_starts_count) {
2105 return KERN_FAILURE;
2106 }
2107
2108 /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
2109 mach_vm_size_t trailing_size = num_trailing_entries << 1;
2110 if (trailing_size >> 1 != num_trailing_entries) {
2111 return KERN_FAILURE;
2112 }
2113
2114 mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
2115 if (required_size < sizeof(*s_info)) {
2116 return KERN_FAILURE;
2117 }
2118
2119 if (required_size > slide_info_size) {
2120 return KERN_FAILURE;
2121 }
2122
2123 return KERN_SUCCESS;
2124 }
2125
2126
2127 kern_return_t
2128 vm_shared_region_slide_sanity_check(vm_shared_region_t sr)
2129 {
2130 vm_shared_region_slide_info_t si;
2131 vm_shared_region_slide_info_entry_t s_info;
2132 kern_return_t kr;
2133
2134 si = vm_shared_region_get_slide_info(sr);
2135 s_info = si->slide_info_entry;
2136
2137 kr = mach_vm_protect(kernel_map,
2138 (mach_vm_offset_t)(vm_offset_t)s_info,
2139 (mach_vm_size_t) si->slide_info_size,
2140 TRUE, VM_PROT_READ);
2141 if (kr != KERN_SUCCESS) {
2142 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr);
2143 }
2144
2145 if (s_info->version == 1) {
2146 kr = vm_shared_region_slide_sanity_check_v1(&s_info->v1);
2147 } else if (s_info->version == 2) {
2148 kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, si->slide_info_size);
2149 } else if (s_info->version == 3) {
2150 kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, si->slide_info_size);
2151 } else if (s_info->version == 4) {
2152 kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size);
2153 } else {
2154 goto fail;
2155 }
2156 if (kr != KERN_SUCCESS) {
2157 goto fail;
2158 }
2159
2160 return KERN_SUCCESS;
2161 fail:
2162 if (si->slide_info_entry != NULL) {
2163 kmem_free(kernel_map,
2164 (vm_offset_t) si->slide_info_entry,
2165 (vm_size_t) si->slide_info_size);
2166
2167 vm_object_deallocate(si->slide_object);
2168 si->slide_object = NULL;
2169 si->start = 0;
2170 si->end = 0;
2171 si->slide = 0;
2172 si->slide_info_entry = NULL;
2173 si->slide_info_size = 0;
2174 }
2175 return KERN_FAILURE;
2176 }
2177
2178 static kern_return_t
2179 vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
2180 {
2181 uint16_t *toc = NULL;
2182 slide_info_entry_toc_t bitmap = NULL;
2183 uint32_t i = 0, j = 0;
2184 uint8_t b = 0;
2185 uint32_t slide = si->slide;
2186 int is_64 = task_has_64Bit_addr(current_task());
2187
2188 vm_shared_region_slide_info_entry_v1_t s_info = &si->slide_info_entry->v1;
2189 toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
2190
2191 if (pageIndex >= s_info->toc_count) {
2192 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count);
2193 } else {
2194 uint16_t entryIndex = (uint16_t)(toc[pageIndex]);
2195 slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset);
2196
2197 if (entryIndex >= s_info->entry_count) {
2198 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count);
2199 } else {
2200 bitmap = &slide_info_entries[entryIndex];
2201
2202 for (i = 0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) {
2203 b = bitmap->entry[i];
2204 if (b != 0) {
2205 for (j = 0; j < 8; ++j) {
2206 if (b & (1 << j)) {
2207 uint32_t *ptr_to_slide;
2208 uint32_t old_value;
2209
2210 ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr) + (sizeof(uint32_t) * (i * 8 + j)));
2211 old_value = *ptr_to_slide;
2212 *ptr_to_slide += slide;
2213 if (is_64 && *ptr_to_slide < old_value) {
2214 /*
2215 * We just slid the low 32 bits of a 64-bit pointer
2216 * and it looks like there should have been a carry-over
2217 * to the upper 32 bits.
2218 * The sliding failed...
2219 */
2220 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
2221 i, j, b, slide, old_value, *ptr_to_slide);
2222 return KERN_FAILURE;
2223 }
2224 }
2225 }
2226 }
2227 }
2228 }
2229 }
2230
2231 return KERN_SUCCESS;
2232 }
2233
2234 static kern_return_t
2235 rebase_chain_32(
2236 uint8_t *page_content,
2237 uint16_t start_offset,
2238 uint32_t slide_amount,
2239 vm_shared_region_slide_info_entry_v2_t s_info)
2240 {
2241 const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
2242
2243 const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
2244 const uint32_t value_mask = ~delta_mask;
2245 const uint32_t value_add = (uint32_t)(s_info->value_add);
2246 const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2247
2248 uint32_t page_offset = start_offset;
2249 uint32_t delta = 1;
2250
2251 while (delta != 0 && page_offset <= last_page_offset) {
2252 uint8_t *loc;
2253 uint32_t value;
2254
2255 loc = page_content + page_offset;
2256 memcpy(&value, loc, sizeof(value));
2257 delta = (value & delta_mask) >> delta_shift;
2258 value &= value_mask;
2259
2260 if (value != 0) {
2261 value += value_add;
2262 value += slide_amount;
2263 }
2264 memcpy(loc, &value, sizeof(value));
2265 page_offset += delta;
2266 }
2267
2268 /* If the offset went past the end of the page, then the slide data is invalid. */
2269 if (page_offset > last_page_offset) {
2270 return KERN_FAILURE;
2271 }
2272 return KERN_SUCCESS;
2273 }
2274
2275 static kern_return_t
2276 rebase_chain_64(
2277 uint8_t *page_content,
2278 uint16_t start_offset,
2279 uint32_t slide_amount,
2280 vm_shared_region_slide_info_entry_v2_t s_info)
2281 {
2282 const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint64_t);
2283
2284 const uint64_t delta_mask = s_info->delta_mask;
2285 const uint64_t value_mask = ~delta_mask;
2286 const uint64_t value_add = s_info->value_add;
2287 const uint64_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2288
2289 uint32_t page_offset = start_offset;
2290 uint32_t delta = 1;
2291
2292 while (delta != 0 && page_offset <= last_page_offset) {
2293 uint8_t *loc;
2294 uint64_t value;
2295
2296 loc = page_content + page_offset;
2297 memcpy(&value, loc, sizeof(value));
2298 delta = (uint32_t)((value & delta_mask) >> delta_shift);
2299 value &= value_mask;
2300
2301 if (value != 0) {
2302 value += value_add;
2303 value += slide_amount;
2304 }
2305 memcpy(loc, &value, sizeof(value));
2306 page_offset += delta;
2307 }
2308
2309 if (page_offset + sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE) {
2310 /* If a pointer straddling the page boundary needs to be adjusted, then
2311 * add the slide to the lower half. The encoding guarantees that the upper
2312 * half on the next page will need no masking.
2313 *
2314 * This assumes a little-endian machine and that the region being slid
2315 * never crosses a 4 GB boundary. */
2316
2317 uint8_t *loc = page_content + page_offset;
2318 uint32_t value;
2319
2320 memcpy(&value, loc, sizeof(value));
2321 value += slide_amount;
2322 memcpy(loc, &value, sizeof(value));
2323 } else if (page_offset > last_page_offset) {
2324 return KERN_FAILURE;
2325 }
2326
2327 return KERN_SUCCESS;
2328 }
2329
2330 static kern_return_t
2331 rebase_chain(
2332 boolean_t is_64,
2333 uint32_t pageIndex,
2334 uint8_t *page_content,
2335 uint16_t start_offset,
2336 uint32_t slide_amount,
2337 vm_shared_region_slide_info_entry_v2_t s_info)
2338 {
2339 kern_return_t kr;
2340 if (is_64) {
2341 kr = rebase_chain_64(page_content, start_offset, slide_amount, s_info);
2342 } else {
2343 kr = rebase_chain_32(page_content, start_offset, slide_amount, s_info);
2344 }
2345
2346 if (kr != KERN_SUCCESS) {
2347 printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
2348 pageIndex, start_offset, slide_amount);
2349 }
2350 return kr;
2351 }
2352
2353 static kern_return_t
2354 vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
2355 {
2356 vm_shared_region_slide_info_entry_v2_t s_info = &si->slide_info_entry->v2;
2357 const uint32_t slide_amount = si->slide;
2358
2359 /* The high bits of the delta_mask field are nonzero precisely when the shared
2360 * cache is 64-bit. */
2361 const boolean_t is_64 = (s_info->delta_mask >> 32) != 0;
2362
2363 const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
2364 const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
2365
2366 uint8_t *page_content = (uint8_t *)vaddr;
2367 uint16_t page_entry;
2368
2369 if (pageIndex >= s_info->page_starts_count) {
2370 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2371 pageIndex, s_info->page_starts_count);
2372 return KERN_FAILURE;
2373 }
2374 page_entry = page_starts[pageIndex];
2375
2376 if (page_entry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) {
2377 return KERN_SUCCESS;
2378 }
2379
2380 if (page_entry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) {
2381 uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE_PAGE_VALUE;
2382 uint16_t info;
2383
2384 do {
2385 uint16_t page_start_offset;
2386 kern_return_t kr;
2387
2388 if (chain_index >= s_info->page_extras_count) {
2389 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2390 chain_index, s_info->page_extras_count);
2391 return KERN_FAILURE;
2392 }
2393 info = page_extras[chain_index];
2394 page_start_offset = (info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
2395
2396 kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
2397 if (kr != KERN_SUCCESS) {
2398 return KERN_FAILURE;
2399 }
2400
2401 chain_index++;
2402 } while (!(info & DYLD_CACHE_SLIDE_PAGE_ATTR_END));
2403 } else {
2404 const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
2405 kern_return_t kr;
2406
2407 kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
2408 if (kr != KERN_SUCCESS) {
2409 return KERN_FAILURE;
2410 }
2411 }
2412
2413 return KERN_SUCCESS;
2414 }
2415
2416
2417 static kern_return_t
2418 vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vaddr, __unused mach_vm_offset_t uservaddr, uint32_t pageIndex)
2419 {
2420 vm_shared_region_slide_info_entry_v3_t s_info = &si->slide_info_entry->v3;
2421 const uint32_t slide_amount = si->slide;
2422
2423 uint8_t *page_content = (uint8_t *)vaddr;
2424 uint16_t page_entry;
2425
2426 if (pageIndex >= s_info->page_starts_count) {
2427 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2428 pageIndex, s_info->page_starts_count);
2429 return KERN_FAILURE;
2430 }
2431 page_entry = s_info->page_starts[pageIndex];
2432
2433 if (page_entry == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) {
2434 return KERN_SUCCESS;
2435 }
2436
2437 uint8_t* rebaseLocation = page_content;
2438 uint64_t delta = page_entry;
2439 do {
2440 rebaseLocation += delta;
2441 uint64_t value;
2442 memcpy(&value, rebaseLocation, sizeof(value));
2443 delta = ((value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
2444
2445 // A pointer is one of :
2446 // {
2447 // uint64_t pointerValue : 51;
2448 // uint64_t offsetToNextPointer : 11;
2449 // uint64_t isBind : 1 = 0;
2450 // uint64_t authenticated : 1 = 0;
2451 // }
2452 // {
2453 // uint32_t offsetFromSharedCacheBase;
2454 // uint16_t diversityData;
2455 // uint16_t hasAddressDiversity : 1;
2456 // uint16_t hasDKey : 1;
2457 // uint16_t hasBKey : 1;
2458 // uint16_t offsetToNextPointer : 11;
2459 // uint16_t isBind : 1;
2460 // uint16_t authenticated : 1 = 1;
2461 // }
2462
2463 bool isBind = (value & (1ULL << 62)) == 1;
2464 if (isBind) {
2465 return KERN_FAILURE;
2466 }
2467
2468 bool isAuthenticated = (value & (1ULL << 63)) != 0;
2469
2470 if (isAuthenticated) {
2471 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
2472 value = (value & 0xFFFFFFFF) + slide_amount;
2473 // Add in the offset from the mach_header
2474 const uint64_t value_add = s_info->value_add;
2475 value += value_add;
2476
2477 } else {
2478 // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
2479 // Regular pointer which needs to fit in 51-bits of value.
2480 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
2481 // and the bottom 43-bits to be fit in to 51-bits.
2482 uint64_t top8Bits = value & 0x0007F80000000000ULL;
2483 uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL;
2484 uint64_t targetValue = (top8Bits << 13) | bottom43Bits;
2485 value = targetValue + slide_amount;
2486 }
2487
2488 memcpy(rebaseLocation, &value, sizeof(value));
2489 } while (delta != 0);
2490
2491 return KERN_SUCCESS;
2492 }
2493
2494 static kern_return_t
2495 rebase_chainv4(
2496 uint8_t *page_content,
2497 uint16_t start_offset,
2498 uint32_t slide_amount,
2499 vm_shared_region_slide_info_entry_v4_t s_info)
2500 {
2501 const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
2502
2503 const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
2504 const uint32_t value_mask = ~delta_mask;
2505 const uint32_t value_add = (uint32_t)(s_info->value_add);
2506 const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
2507
2508 uint32_t page_offset = start_offset;
2509 uint32_t delta = 1;
2510
2511 while (delta != 0 && page_offset <= last_page_offset) {
2512 uint8_t *loc;
2513 uint32_t value;
2514
2515 loc = page_content + page_offset;
2516 memcpy(&value, loc, sizeof(value));
2517 delta = (value & delta_mask) >> delta_shift;
2518 value &= value_mask;
2519
2520 if ((value & 0xFFFF8000) == 0) {
2521 // small positive non-pointer, use as-is
2522 } else if ((value & 0x3FFF8000) == 0x3FFF8000) {
2523 // small negative non-pointer
2524 value |= 0xC0000000;
2525 } else {
2526 // pointer that needs rebasing
2527 value += value_add;
2528 value += slide_amount;
2529 }
2530 memcpy(loc, &value, sizeof(value));
2531 page_offset += delta;
2532 }
2533
2534 /* If the offset went past the end of the page, then the slide data is invalid. */
2535 if (page_offset > last_page_offset) {
2536 return KERN_FAILURE;
2537 }
2538 return KERN_SUCCESS;
2539 }
2540
2541 static kern_return_t
2542 vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
2543 {
2544 vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4;
2545 const uint32_t slide_amount = si->slide;
2546
2547 const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
2548 const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
2549
2550 uint8_t *page_content = (uint8_t *)vaddr;
2551 uint16_t page_entry;
2552
2553 if (pageIndex >= s_info->page_starts_count) {
2554 printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
2555 pageIndex, s_info->page_starts_count);
2556 return KERN_FAILURE;
2557 }
2558 page_entry = page_starts[pageIndex];
2559
2560 if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) {
2561 return KERN_SUCCESS;
2562 }
2563
2564 if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) {
2565 uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX;
2566 uint16_t info;
2567
2568 do {
2569 uint16_t page_start_offset;
2570 kern_return_t kr;
2571
2572 if (chain_index >= s_info->page_extras_count) {
2573 printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
2574 chain_index, s_info->page_extras_count);
2575 return KERN_FAILURE;
2576 }
2577 info = page_extras[chain_index];
2578 page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
2579
2580 kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
2581 if (kr != KERN_SUCCESS) {
2582 return KERN_FAILURE;
2583 }
2584
2585 chain_index++;
2586 } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END));
2587 } else {
2588 const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
2589 kern_return_t kr;
2590
2591 kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
2592 if (kr != KERN_SUCCESS) {
2593 return KERN_FAILURE;
2594 }
2595 }
2596
2597 return KERN_SUCCESS;
2598 }
2599
2600
2601
2602 kern_return_t
2603 vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, mach_vm_offset_t uservaddr, uint32_t pageIndex)
2604 {
2605 if (si->slide_info_entry->version == 1) {
2606 return vm_shared_region_slide_page_v1(si, vaddr, pageIndex);
2607 } else if (si->slide_info_entry->version == 2) {
2608 return vm_shared_region_slide_page_v2(si, vaddr, pageIndex);
2609 } else if (si->slide_info_entry->version == 3) {
2610 return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex);
2611 } else if (si->slide_info_entry->version == 4) {
2612 return vm_shared_region_slide_page_v4(si, vaddr, pageIndex);
2613 } else {
2614 return KERN_FAILURE;
2615 }
2616 }
2617
2618 /******************************************************************************/
2619 /* Comm page support */
2620 /******************************************************************************/
2621
2622 ipc_port_t commpage32_handle = IPC_PORT_NULL;
2623 ipc_port_t commpage64_handle = IPC_PORT_NULL;
2624 vm_named_entry_t commpage32_entry = NULL;
2625 vm_named_entry_t commpage64_entry = NULL;
2626 vm_map_t commpage32_map = VM_MAP_NULL;
2627 vm_map_t commpage64_map = VM_MAP_NULL;
2628
2629 ipc_port_t commpage_text32_handle = IPC_PORT_NULL;
2630 ipc_port_t commpage_text64_handle = IPC_PORT_NULL;
2631 vm_named_entry_t commpage_text32_entry = NULL;
2632 vm_named_entry_t commpage_text64_entry = NULL;
2633 vm_map_t commpage_text32_map = VM_MAP_NULL;
2634 vm_map_t commpage_text64_map = VM_MAP_NULL;
2635
2636 user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START;
2637 user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START;
2638
2639 #if defined(__i386__) || defined(__x86_64__)
2640 /*
2641 * Create a memory entry, VM submap and pmap for one commpage.
2642 */
2643 static void
2644 _vm_commpage_init(
2645 ipc_port_t *handlep,
2646 vm_map_size_t size)
2647 {
2648 kern_return_t kr;
2649 vm_named_entry_t mem_entry;
2650 vm_map_t new_map;
2651
2652 SHARED_REGION_TRACE_DEBUG(
2653 ("commpage: -> _init(0x%llx)\n",
2654 (long long)size));
2655
2656 kr = mach_memory_entry_allocate(&mem_entry,
2657 handlep);
2658 if (kr != KERN_SUCCESS) {
2659 panic("_vm_commpage_init: could not allocate mem_entry");
2660 }
2661 new_map = vm_map_create(pmap_create(NULL, 0, 0), 0, size, TRUE);
2662 if (new_map == VM_MAP_NULL) {
2663 panic("_vm_commpage_init: could not allocate VM map");
2664 }
2665 mem_entry->backing.map = new_map;
2666 mem_entry->internal = TRUE;
2667 mem_entry->is_sub_map = TRUE;
2668 mem_entry->offset = 0;
2669 mem_entry->protection = VM_PROT_ALL;
2670 mem_entry->size = size;
2671
2672 SHARED_REGION_TRACE_DEBUG(
2673 ("commpage: _init(0x%llx) <- %p\n",
2674 (long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep)));
2675 }
2676 #endif
2677
2678
2679 /*
2680 * Initialize the comm text pages at boot time
2681 */
2682 extern u_int32_t random(void);
2683 void
2684 vm_commpage_text_init(void)
2685 {
2686 SHARED_REGION_TRACE_DEBUG(
2687 ("commpage text: ->init()\n"));
2688 #if defined(__i386__) || defined(__x86_64__)
2689 /* create the 32 bit comm text page */
2690 unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */
2691 _vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
2692 commpage_text32_entry = (vm_named_entry_t) commpage_text32_handle->ip_kobject;
2693 commpage_text32_map = commpage_text32_entry->backing.map;
2694 commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset);
2695 /* XXX if (cpu_is_64bit_capable()) ? */
2696 /* create the 64-bit comm page */
2697 offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */
2698 _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
2699 commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject;
2700 commpage_text64_map = commpage_text64_entry->backing.map;
2701 commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset);
2702
2703 commpage_text_populate();
2704 #elif defined(__arm64__) || defined(__arm__)
2705 #else
2706 #error Unknown architecture.
2707 #endif /* __i386__ || __x86_64__ */
2708 /* populate the routines in here */
2709 SHARED_REGION_TRACE_DEBUG(
2710 ("commpage text: init() <-\n"));
2711 }
2712
2713 /*
2714 * Initialize the comm pages at boot time.
2715 */
2716 void
2717 vm_commpage_init(void)
2718 {
2719 SHARED_REGION_TRACE_DEBUG(
2720 ("commpage: -> init()\n"));
2721
2722 #if defined(__i386__) || defined(__x86_64__)
2723 /* create the 32-bit comm page */
2724 _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH);
2725 commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject;
2726 commpage32_map = commpage32_entry->backing.map;
2727
2728 /* XXX if (cpu_is_64bit_capable()) ? */
2729 /* create the 64-bit comm page */
2730 _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH);
2731 commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject;
2732 commpage64_map = commpage64_entry->backing.map;
2733
2734 #endif /* __i386__ || __x86_64__ */
2735
2736 /* populate them according to this specific platform */
2737 commpage_populate();
2738 __commpage_setup = 1;
2739 #if defined(__i386__) || defined(__x86_64__)
2740 if (__system_power_source == 0) {
2741 post_sys_powersource_internal(0, 1);
2742 }
2743 #endif /* __i386__ || __x86_64__ */
2744
2745 SHARED_REGION_TRACE_DEBUG(
2746 ("commpage: init() <-\n"));
2747 }
2748
2749 /*
2750 * Enter the appropriate comm page into the task's address space.
2751 * This is called at exec() time via vm_map_exec().
2752 */
2753 kern_return_t
2754 vm_commpage_enter(
2755 vm_map_t map,
2756 task_t task,
2757 boolean_t is64bit)
2758 {
2759 #if defined(__arm__)
2760 #pragma unused(is64bit)
2761 (void)task;
2762 (void)map;
2763 return KERN_SUCCESS;
2764 #elif defined(__arm64__)
2765 #pragma unused(is64bit)
2766 (void)task;
2767 (void)map;
2768 pmap_insert_sharedpage(vm_map_pmap(map));
2769 return KERN_SUCCESS;
2770 #else
2771 ipc_port_t commpage_handle, commpage_text_handle;
2772 vm_map_offset_t commpage_address, objc_address, commpage_text_address;
2773 vm_map_size_t commpage_size, objc_size, commpage_text_size;
2774 int vm_flags;
2775 vm_map_kernel_flags_t vmk_flags;
2776 kern_return_t kr;
2777
2778 SHARED_REGION_TRACE_DEBUG(
2779 ("commpage: -> enter(%p,%p)\n",
2780 (void *)VM_KERNEL_ADDRPERM(map),
2781 (void *)VM_KERNEL_ADDRPERM(task)));
2782
2783 commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH;
2784 /* the comm page is likely to be beyond the actual end of the VM map */
2785 vm_flags = VM_FLAGS_FIXED;
2786 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
2787 vmk_flags.vmkf_beyond_max = TRUE;
2788
2789 /* select the appropriate comm page for this task */
2790 assert(!(is64bit ^ vm_map_is_64bit(map)));
2791 if (is64bit) {
2792 commpage_handle = commpage64_handle;
2793 commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS;
2794 commpage_size = _COMM_PAGE64_AREA_LENGTH;
2795 objc_size = _COMM_PAGE64_OBJC_SIZE;
2796 objc_address = _COMM_PAGE64_OBJC_BASE;
2797 commpage_text_handle = commpage_text64_handle;
2798 commpage_text_address = (vm_map_offset_t) commpage_text64_location;
2799 } else {
2800 commpage_handle = commpage32_handle;
2801 commpage_address =
2802 (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS;
2803 commpage_size = _COMM_PAGE32_AREA_LENGTH;
2804 objc_size = _COMM_PAGE32_OBJC_SIZE;
2805 objc_address = _COMM_PAGE32_OBJC_BASE;
2806 commpage_text_handle = commpage_text32_handle;
2807 commpage_text_address = (vm_map_offset_t) commpage_text32_location;
2808 }
2809
2810 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2811 if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 &&
2812 (commpage_size & (pmap_nesting_size_min - 1)) == 0) {
2813 /* the commpage is properly aligned or sized for pmap-nesting */
2814 tag = VM_MEMORY_SHARED_PMAP;
2815 }
2816 /* map the comm page in the task's address space */
2817 assert(commpage_handle != IPC_PORT_NULL);
2818 kr = vm_map_enter_mem_object(
2819 map,
2820 &commpage_address,
2821 commpage_size,
2822 0,
2823 vm_flags,
2824 vmk_flags,
2825 tag,
2826 commpage_handle,
2827 0,
2828 FALSE,
2829 VM_PROT_READ,
2830 VM_PROT_READ,
2831 VM_INHERIT_SHARE);
2832 if (kr != KERN_SUCCESS) {
2833 SHARED_REGION_TRACE_ERROR(
2834 ("commpage: enter(%p,0x%llx,0x%llx) "
2835 "commpage %p mapping failed 0x%x\n",
2836 (void *)VM_KERNEL_ADDRPERM(map),
2837 (long long)commpage_address,
2838 (long long)commpage_size,
2839 (void *)VM_KERNEL_ADDRPERM(commpage_handle), kr));
2840 }
2841
2842 /* map the comm text page in the task's address space */
2843 assert(commpage_text_handle != IPC_PORT_NULL);
2844 kr = vm_map_enter_mem_object(
2845 map,
2846 &commpage_text_address,
2847 commpage_text_size,
2848 0,
2849 vm_flags,
2850 vmk_flags,
2851 tag,
2852 commpage_text_handle,
2853 0,
2854 FALSE,
2855 VM_PROT_READ | VM_PROT_EXECUTE,
2856 VM_PROT_READ | VM_PROT_EXECUTE,
2857 VM_INHERIT_SHARE);
2858 if (kr != KERN_SUCCESS) {
2859 SHARED_REGION_TRACE_ERROR(
2860 ("commpage text: enter(%p,0x%llx,0x%llx) "
2861 "commpage text %p mapping failed 0x%x\n",
2862 (void *)VM_KERNEL_ADDRPERM(map),
2863 (long long)commpage_text_address,
2864 (long long)commpage_text_size,
2865 (void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr));
2866 }
2867
2868 /*
2869 * Since we're here, we also pre-allocate some virtual space for the
2870 * Objective-C run-time, if needed...
2871 */
2872 if (objc_size != 0) {
2873 kr = vm_map_enter_mem_object(
2874 map,
2875 &objc_address,
2876 objc_size,
2877 0,
2878 VM_FLAGS_FIXED,
2879 vmk_flags,
2880 tag,
2881 IPC_PORT_NULL,
2882 0,
2883 FALSE,
2884 VM_PROT_ALL,
2885 VM_PROT_ALL,
2886 VM_INHERIT_DEFAULT);
2887 if (kr != KERN_SUCCESS) {
2888 SHARED_REGION_TRACE_ERROR(
2889 ("commpage: enter(%p,0x%llx,0x%llx) "
2890 "objc mapping failed 0x%x\n",
2891 (void *)VM_KERNEL_ADDRPERM(map),
2892 (long long)objc_address,
2893 (long long)objc_size, kr));
2894 }
2895 }
2896
2897 SHARED_REGION_TRACE_DEBUG(
2898 ("commpage: enter(%p,%p) <- 0x%x\n",
2899 (void *)VM_KERNEL_ADDRPERM(map),
2900 (void *)VM_KERNEL_ADDRPERM(task), kr));
2901 return kr;
2902 #endif
2903 }
2904
2905 int
2906 vm_shared_region_slide(uint32_t slide,
2907 mach_vm_offset_t entry_start_address,
2908 mach_vm_size_t entry_size,
2909 mach_vm_offset_t slide_start,
2910 mach_vm_size_t slide_size,
2911 mach_vm_offset_t slid_mapping,
2912 memory_object_control_t sr_file_control)
2913 {
2914 void *slide_info_entry = NULL;
2915 int error;
2916 vm_shared_region_t sr;
2917
2918 SHARED_REGION_TRACE_DEBUG(
2919 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2920 slide, entry_start_address, entry_size, slide_start, slide_size));
2921
2922 sr = vm_shared_region_get(current_task());
2923 if (sr == NULL) {
2924 printf("%s: no shared region?\n", __FUNCTION__);
2925 SHARED_REGION_TRACE_DEBUG(
2926 ("vm_shared_region_slide: <- %d (no shared region)\n",
2927 KERN_FAILURE));
2928 return KERN_FAILURE;
2929 }
2930
2931 /*
2932 * Protect from concurrent access.
2933 */
2934 vm_shared_region_lock();
2935 while (sr->sr_slide_in_progress) {
2936 vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT);
2937 }
2938 if (sr->sr_slid
2939 #ifndef CONFIG_EMBEDDED
2940 || shared_region_completed_slide
2941 #endif
2942 ) {
2943 vm_shared_region_unlock();
2944
2945 vm_shared_region_deallocate(sr);
2946 printf("%s: shared region already slid?\n", __FUNCTION__);
2947 SHARED_REGION_TRACE_DEBUG(
2948 ("vm_shared_region_slide: <- %d (already slid)\n",
2949 KERN_FAILURE));
2950 return KERN_FAILURE;
2951 }
2952
2953 sr->sr_slide_in_progress = TRUE;
2954 vm_shared_region_unlock();
2955
2956 error = vm_shared_region_slide_mapping(sr,
2957 slide_size,
2958 entry_start_address,
2959 entry_size,
2960 slid_mapping,
2961 slide,
2962 sr_file_control);
2963 if (error) {
2964 printf("slide_info initialization failed with kr=%d\n", error);
2965 goto done;
2966 }
2967
2968 slide_info_entry = vm_shared_region_get_slide_info_entry(sr);
2969 if (slide_info_entry == NULL) {
2970 error = KERN_FAILURE;
2971 } else {
2972 error = copyin((user_addr_t)slide_start,
2973 slide_info_entry,
2974 (vm_size_t)slide_size);
2975 if (error) {
2976 error = KERN_INVALID_ADDRESS;
2977 }
2978 }
2979 if (error) {
2980 goto done;
2981 }
2982
2983 if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) {
2984 error = KERN_INVALID_ARGUMENT;
2985 printf("Sanity Check failed for slide_info\n");
2986 } else {
2987 #if DEBUG
2988 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2989 (void*)(uintptr_t)entry_start_address,
2990 (unsigned long)entry_size,
2991 (unsigned long)slide_size);
2992 #endif
2993 }
2994 done:
2995 vm_shared_region_lock();
2996
2997 assert(sr->sr_slide_in_progress);
2998 assert(sr->sr_slid == FALSE);
2999 sr->sr_slide_in_progress = FALSE;
3000 thread_wakeup(&sr->sr_slide_in_progress);
3001
3002 if (error == KERN_SUCCESS) {
3003 sr->sr_slid = TRUE;
3004
3005 /*
3006 * We don't know how to tear down a slid shared region today, because
3007 * we would have to invalidate all the pages that have been slid
3008 * atomically with respect to anyone mapping the shared region afresh.
3009 * Therefore, take a dangling reference to prevent teardown.
3010 */
3011 sr->sr_ref_count++;
3012 #ifndef CONFIG_EMBEDDED
3013 shared_region_completed_slide = TRUE;
3014 #endif
3015 }
3016 vm_shared_region_unlock();
3017
3018 vm_shared_region_deallocate(sr);
3019
3020 SHARED_REGION_TRACE_DEBUG(
3021 ("vm_shared_region_slide: <- %d\n",
3022 error));
3023
3024 return error;
3025 }
3026
3027 /*
3028 * This is called from powermanagement code to let kernel know the current source of power.
3029 * 0 if it is external source (connected to power )
3030 * 1 if it is internal power source ie battery
3031 */
3032 void
3033 #if defined(__i386__) || defined(__x86_64__)
3034 post_sys_powersource(int i)
3035 #else
3036 post_sys_powersource(__unused int i)
3037 #endif
3038 {
3039 #if defined(__i386__) || defined(__x86_64__)
3040 post_sys_powersource_internal(i, 0);
3041 #endif /* __i386__ || __x86_64__ */
3042 }
3043
3044
3045 #if defined(__i386__) || defined(__x86_64__)
3046 static void
3047 post_sys_powersource_internal(int i, int internal)
3048 {
3049 if (internal == 0) {
3050 __system_power_source = i;
3051 }
3052
3053 if (__commpage_setup != 0) {
3054 if (__system_power_source != 0) {
3055 commpage_set_spin_count(0);
3056 } else {
3057 commpage_set_spin_count(MP_SPIN_TRIES);
3058 }
3059 }
3060 }
3061 #endif /* __i386__ || __x86_64__ */